diff --git a/go.mod b/go.mod
index c73a0ed826f240ac281f73fb34458d304ecee9d5..fea5c0702f982d7a1dd44628e2d23ddf870a964a 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
 	github.com/google/gopacket v1.1.19
 	github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f
 	github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
-	github.com/netobserv/flowlogs-pipeline v0.1.11-0.20231123152750-f3b03fa192aa
+	github.com/netobserv/flowlogs-pipeline v0.1.11
 	github.com/netobserv/gopipes v0.3.0
 	github.com/paulbellamy/ratecounter v0.2.0
 	github.com/segmentio/kafka-go v0.4.47
@@ -32,8 +32,10 @@ require (
 
 require (
 	github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect
+	github.com/agoda-com/opentelemetry-logs-go v0.4.3 // indirect
 	github.com/benbjohnson/clock v1.3.5 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
+	github.com/cenkalti/backoff/v4 v4.2.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
 	github.com/dustin/go-humanize v1.0.1 // indirect
@@ -42,7 +44,8 @@ require (
 	github.com/go-kit/kit v0.13.0 // indirect
 	github.com/go-kit/log v0.2.1 // indirect
 	github.com/go-logfmt/logfmt v0.5.1 // indirect
-	github.com/go-logr/logr v1.3.0 // indirect
+	github.com/go-logr/logr v1.4.1 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-openapi/jsonpointer v0.19.6 // indirect
 	github.com/go-openapi/jsonreference v0.20.2 // indirect
 	github.com/go-openapi/swag v0.22.3 // indirect
@@ -52,21 +55,21 @@ require (
 	github.com/google/gnostic-models v0.6.8 // indirect
 	github.com/google/go-cmp v0.6.0 // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
-	github.com/google/uuid v1.4.0 // indirect
+	github.com/google/uuid v1.5.0 // indirect
 	github.com/gorilla/websocket v1.5.0 // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
 	github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb // indirect
 	github.com/imdario/mergo v0.3.15 // indirect
-	github.com/ip2location/ip2location-go/v9 v9.6.0 // indirect
+	github.com/ip2location/ip2location-go/v9 v9.7.0 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
 	github.com/jpillora/backoff v1.0.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
-	github.com/klauspost/compress v1.17.0 // indirect
-	github.com/klauspost/cpuid/v2 v2.2.5 // indirect
+	github.com/klauspost/compress v1.17.4 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.6 // indirect
 	github.com/libp2p/go-reuseport v0.3.0 // indirect
 	github.com/mailru/easyjson v0.7.7 // indirect
-	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/minio/md5-simd v1.1.2 // indirect
-	github.com/minio/minio-go/v7 v7.0.63 // indirect
+	github.com/minio/minio-go/v7 v7.0.67 // indirect
 	github.com/minio/sha256-simd v1.0.1 // indirect
 	github.com/mitchellh/mapstructure v1.5.0 // indirect
 	github.com/moby/spdystream v0.2.0 // indirect
@@ -76,7 +79,7 @@ require (
 	github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
 	github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
 	github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 // indirect
-	github.com/netsampler/goflow2 v1.3.6 // indirect
+	github.com/netsampler/goflow2 v1.3.7 // indirect
 	github.com/pierrec/lz4/v4 v4.1.17 // indirect
 	github.com/pion/dtls/v2 v2.2.4 // indirect
 	github.com/pion/logging v0.2.2 // indirect
@@ -84,10 +87,10 @@ require (
 	github.com/pion/udp v0.1.4 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
-	github.com/prometheus/client_golang v1.17.0 // indirect
-	github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
-	github.com/prometheus/common v0.44.0 // indirect
-	github.com/prometheus/procfs v0.11.1 // indirect
+	github.com/prometheus/client_golang v1.18.0 // indirect
+	github.com/prometheus/client_model v0.5.0 // indirect
+	github.com/prometheus/common v0.46.0 // indirect
+	github.com/prometheus/procfs v0.12.0 // indirect
 	github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 // indirect
 	github.com/rs/xid v1.5.0 // indirect
 	github.com/spf13/pflag v1.0.5 // indirect
@@ -95,16 +98,28 @@ require (
 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
 	github.com/xdg-go/scram v1.1.2 // indirect
 	github.com/xdg-go/stringprep v1.0.4 // indirect
+	go.opentelemetry.io/otel v1.23.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 // indirect
+	go.opentelemetry.io/otel/metric v1.23.1 // indirect
+	go.opentelemetry.io/otel/sdk v1.23.1 // indirect
+	go.opentelemetry.io/otel/sdk/metric v1.23.1 // indirect
+	go.opentelemetry.io/otel/trace v1.23.1 // indirect
+	go.opentelemetry.io/proto/otlp v1.1.0 // indirect
 	go.uber.org/atomic v1.9.0 // indirect
-	golang.org/x/crypto v0.17.0 // indirect
+	golang.org/x/crypto v0.19.0 // indirect
 	golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
-	golang.org/x/net v0.19.0 // indirect
-	golang.org/x/oauth2 v0.14.0 // indirect
-	golang.org/x/term v0.15.0 // indirect
+	golang.org/x/net v0.21.0 // indirect
+	golang.org/x/oauth2 v0.16.0 // indirect
+	golang.org/x/term v0.17.0 // indirect
 	golang.org/x/text v0.14.0 // indirect
-	golang.org/x/time v0.3.0 // indirect
+	golang.org/x/time v0.5.0 // indirect
 	google.golang.org/appengine v1.6.8 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 3f229b3bab2b96fede08b3319066c7b7a98fe3ae..bf9e83ce8cbb47c31cca51a9d952893bce4d6578 100644
--- a/go.sum
+++ b/go.sum
@@ -77,6 +77,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
 github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
 github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
 github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/agoda-com/opentelemetry-logs-go v0.4.3 h1:dYAx/q9di+/Pv6HuGq59DFIOjqKT0LTy3PYTIz8ccq8=
+github.com/agoda-com/opentelemetry-logs-go v0.4.3/go.mod h1:gPQ0fHqroxNP2DlQFZt29/pfqGiP2m6Q5CCxEgLo6yQ=
 github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -119,6 +121,8 @@ github.com/caarlos0/env/v6 v6.10.1/go.mod h1:hvp/ryKXKipEkcuYjs9mI4bBCg+UI0Yhgm5
 github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
 github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
 github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -216,8 +220,12 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV
 github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
 github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
 github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@@ -398,8 +406,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
 github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
 github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
@@ -417,6 +425,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
 github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
 github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
 github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg=
 github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
@@ -471,8 +481,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
 github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
 github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
 github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
-github.com/ip2location/ip2location-go/v9 v9.6.0 h1:do+hKM2wbVG3lXMavZzWuy0znXxJBvGc7mv0wzRVoYc=
-github.com/ip2location/ip2location-go/v9 v9.6.0/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI=
+github.com/ip2location/ip2location-go/v9 v9.7.0 h1:ipwl67HOWcrw+6GOChkEXcreRQR37NabqBd2ayYa4Q0=
+github.com/ip2location/ip2location-go/v9 v9.7.0/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI=
 github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
 github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -509,12 +519,12 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
-github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
 github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
-github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
+github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
 github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
 github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -565,8 +575,6 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
 github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
 github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE=
 github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og=
 github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU=
@@ -576,8 +584,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju
 github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
 github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
 github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ=
-github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4=
+github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8=
+github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A=
 github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
 github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
 github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -620,14 +628,14 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
 github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
 github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
 github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/netobserv/flowlogs-pipeline v0.1.11-0.20231123152750-f3b03fa192aa h1:g09mCEph6ujskD/1EOTeJt49zqLq4zieZjpaXsGzG6s=
-github.com/netobserv/flowlogs-pipeline v0.1.11-0.20231123152750-f3b03fa192aa/go.mod h1:OZPLp6ypAR9NBsAGveBdGHcVQbuZKR2PY/JuXfng2yM=
+github.com/netobserv/flowlogs-pipeline v0.1.11 h1:9346QzgxpRenWkGAAKbYpexivTi/iU4mtSFKWR/KE+w=
+github.com/netobserv/flowlogs-pipeline v0.1.11/go.mod h1:UdsPDd7S4zVQse9dIyhPT/eYA/tMlglcig7Wii6Fixc=
 github.com/netobserv/gopipes v0.3.0 h1:IYmPnnAVCdSK7VmHmpFhrVBOEm45qpgbZmJz1sSW+60=
 github.com/netobserv/gopipes v0.3.0/go.mod h1:N7/Gz05EOF0CQQSKWsv3eof22Cj2PB08Pbttw98YFYU=
 github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe/ci5q+QdM7upFdxiU+D8F3L3qTd5wXCwwHefw=
 github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500/go.mod h1:LHXpc5tjKvsfZn0pwLKrvlgEhZcCaw3Di9mUEZGAI4E=
-github.com/netsampler/goflow2 v1.3.6 h1:fZbHDcWPcG+nkg2wGHCv4VJ9MrG8iA16YmuYhrSAEdQ=
-github.com/netsampler/goflow2 v1.3.6/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ=
+github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc=
+github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ=
 github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
 github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
 github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
@@ -705,16 +713,16 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
 github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
-github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
-github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
 github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
@@ -724,8 +732,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
 github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
 github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
-github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -733,8 +741,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
 github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
-github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
 github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 h1:V/4Cj2GytqdqK7OMEz6c4LNjey3SNyfw3pg5jPKtJvQ=
 github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@@ -751,7 +759,7 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
 github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
@@ -780,16 +788,16 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
 github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI=
+github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
 github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@@ -861,13 +869,35 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
 go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY=
+go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0 h1:tfil6di0PoNV7FZdsCS7A5izZoVVQ7AuXtyekbOpG/I=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0/go.mod h1:AKFZIEPOnqB00P63bTjOiah4ZTaRzl1TKwUWpZdYUHI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 h1:o8iWeVFa1BcLtVEV0LzrCxV2/55tB3xLxADr6Kyoey4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1/go.mod h1:SEVfdK4IoBnbT2FXNM/k8yC08MrfbhWk3U4ljM8B3HE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 h1:p3A5+f5l9e/kuEBwLOrnpkIDHQFlHmbiVxMURWRK6gQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1/go.mod h1:OClrnXUjBqQbInvjJFjYSnMxBSCXBF8r3b34WqjiIrQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 h1:cfuy3bXmLJS7M1RZmAL6SuhGtKUp2KEsrm00OlAXkq4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1/go.mod h1:22jr92C6KwlwItJmQzfixzQM3oyyuYLCfHiMY+rpsPU=
+go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo=
+go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI=
+go.opentelemetry.io/otel/sdk v1.23.1 h1:O7JmZw0h76if63LQdsBMKQDWNb5oEcOThG9IrxscV+E=
+go.opentelemetry.io/otel/sdk v1.23.1/go.mod h1:LzdEVR5am1uKOOwfBWFef2DCi1nu3SA8XQxx2IerWFk=
+go.opentelemetry.io/otel/sdk/metric v1.23.1 h1:T9/8WsYg+ZqIpMWwdISVVrlGb/N0Jr1OHjR/alpKwzg=
+go.opentelemetry.io/otel/sdk/metric v1.23.1/go.mod h1:8WX6WnNtHCgUruJ4TJ+UssQjMtpxkpX0zveQC8JG/E0=
+go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8=
+go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI=
+go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
+go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
 go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
 go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
 go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
@@ -896,8 +926,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
 golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
-golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
 golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -985,8 +1015,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
 golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -994,8 +1024,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
-golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
+golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
+golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1096,8 +1126,8 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
 golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
 golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
-golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1118,8 +1148,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1261,8 +1291,11 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA=
+google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU=
 google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore b/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..ef8fdbd5fdec057dae6adb9f71f488abaaf1cffe
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore
@@ -0,0 +1,28 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+coverage.html
+coverage.txt
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+
+# Intellij
+*.iml
+.idea/
+
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md b/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..14cd47cff84c5b5bf862e2c554fa6503337c2eae
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md
@@ -0,0 +1,92 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [v0.4.3] 2023-11-02
+
+### Fixed
+
+- fix: race on batch processing (#30)
+
+## [v0.4.2] 2023-10-30
+
+### Fixed
+
+- fix: accept any 2xx status code in otlplogshttp client (#26)
+- fix: show the error body when status code is unknown (#27)
+- fix: grpc rapid-reset vulnerability (#28)
+
+## [v0.4.1] 2023-10-13
+
+### Fixed
+
+- autoconfiguration always emit error message on initialization (#23)
+- fix variables and private methods names (#22)
+- merge the logRecord resources with those provided by the logProvider (#21)
+
+## [v0.4.0] 2023-10-02
+
+### Changed
+
+- opentelemetry updated to 1.19.0
+- drop compatibility guarantee of Go [1.19](https://go.dev/doc/go1.19)
+
+## [v0.3.0] 2023-09-13
+
+### Changed
+
+- opentelemetry updated to 1.18.0
+
+### Fixed
+
+- stdoutlogs writer parameter was ignored
+
+## [v0.2.0] 2023-08-30
+
+### Changed
+
+- opentelemetry updated to 1.17.0
+- `github.com/golang/protobuf/proto` replaced with `google.golang.org/protobuf`
+- `otlp/internal` package moved to `otlp/otlplogs/internal`
+- more unit tests added
+
+## [v0.1.2] 2023-08-05
+
+### Fixed
+
+- reverted to all-in-one package
+- inconsistent v0.1.0 go package
+
+## [v0.1.0] 2023-08-05
+
+### Added
+
+- otlplogsgrpc exporter with `grpc` protocol
+- `http/json` protocol supported in otlplogshttp exporter
+- `stdout` logs logger
+- Package split into separate `otel`, `sdk`, `exporters/otlp/otlplogs` and `exporters/stdout/stdoutlogs` packages
+- `OTEL_EXPORTER_OTLP_PROTOCOL` env variable to configure `grpc`, `http/protobuf` and `http/json` otlp formats with OTEL
+  logs exporter
+- `autoconfigure` sdk package with `OTEL_LOGS_EXPORTER` env variable support with `none`,`otlp` and `logging` options to
+  autoconfigure logger provider
+
+## [v0.0.1] 2023-07-25
+
+### Added
+
+- implementation of [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api) with Stable API and SDK
+  API interfaces.
+- Package all-in-one for logs `github.com/agoda-com/opentelemetry-logs-go`
+- Package module `semconv`
+  with [Logs Exceptions Semantic Conventions](https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#attributes)
+- Package module `logs`
+  with [Stable Log Model](https://opentelemetry.io/docs/specs/otel/logs/data-model), [Logger](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#logger)
+  and [LoggerProvider](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#loggerprovider) interfaces
+- Package module `sdk` with [Logger SDK](https://opentelemetry.io/docs/specs/otel/logs/sdk/) implementation
+- Package module `exporters`
+  with [Built-in processors](https://opentelemetry.io/docs/specs/otel/logs/sdk/#built-in-processors), `otlp` interface
+  and `noop` and `http/protobuf` exporters
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS b/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS
new file mode 100644
index 0000000000000000000000000000000000000000..9f8212f9945c128d3307d4799d6e237d46c7013a
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS
@@ -0,0 +1,13 @@
+#####################################################
+#
+# List of approvers for this repository
+#
+#####################################################
+#
+# Learn about CODEOWNERS file format:
+#  https://help.github.com/en/articles/about-code-owners
+#
+
+* @chameleon82
+
+CODEOWNERS @chameleon82
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE
similarity index 99%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
rename to vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE
index 8dada3edaf50dbc082c9a125058f25def75e625a..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE
@@ -178,7 +178,7 @@
    APPENDIX: How to apply the Apache License to your work.
 
       To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
+      boilerplate notice, with the fields enclosed by brackets "[]"
       replaced with your own identifying information. (Don't include
       the brackets!)  The text should be enclosed in the appropriate
       comment syntax for the file format. We also recommend that a
@@ -186,7 +186,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright {yyyy} {name of copyright owner}
+   Copyright [yyyy] [name of copyright owner]
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile b/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..df59b790c3ff2404f8244d787beb286081632b9a
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile
@@ -0,0 +1,9 @@
+.PHONY: test-coverage
+
+test-coverage:
+	go test -coverprofile=coverage.out ./...
+	go tool cover -html=coverage.out -o coverage.html
+
+.PHONY: test-race
+test-race:
+	go test -race ./...
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md b/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b2e38fc6d7fb5a902fccadb2916900052872f066
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md
@@ -0,0 +1,117 @@
+# OpenTelemetry-Logs-Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/agoda-com/opentelemetry-logs-go.svg)](https://pkg.go.dev/github.com/agoda-com/opentelemetry-logs-go)
+[![codecov](https://codecov.io/github/agoda-com/opentelemetry-logs-go/graph/badge.svg?token=F1NW0R0W75)](https://codecov.io/github/agoda-com/opentelemetry-logs-go)
+
+OpenTelemetry-Logs-Go is the [Go](https://golang.org) implementation of [OpenTelemetry](https://opentelemetry.io/) Logs.
+It provides API to directly send logging data to observability platforms. It is an extension of official
+[open-telemetry/opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) to support Logs.
+
+## Project Life Cycle
+
+This project was created due log module freeze in
+official [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) repository:
+
+```
+The Logs signal development is halted for this project while we stablize the Metrics SDK. 
+No Logs Pull Requests are currently being accepted.
+```
+
+This project will be deprecated once official [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go)
+repository Logs module will have status "Stable".
+
+## Project packages
+
+| Packages                         | Description                                                                |
+|----------------------------------|----------------------------------------------------------------------------|
+| [autoconfigure](./autoconfigure) | Autoconfiguration SDK. Allow to configure log exporters with env variables |
+| [sdk](./sdk)                     | Opentelemetry Logs SDK                                                     |
+| [exporters/otlp](./exporters)    | OTLP format exporter                                                       |
+| [exporters/stdout](./exporters)  | Console exporter                                                           |                                                            
+
+## Quick start
+
+This is an implementation of [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/) and not
+intended to use by developers directly. It is provided for logging library authors to build log appenders, which use
+this API to bridge between existing logging libraries and the OpenTelemetry log data model.
+
+Example bellow will show how logging library could be instrumented with current API:
+
+```go
+package myInstrumentedLogger
+
+import (
+	otel "github.com/agoda-com/opentelemetry-logs-go"
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+	semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+const (
+	instrumentationName    = "otel/zap"
+	instrumentationVersion = "0.0.1"
+)
+
+var (
+	logger = otel.GetLoggerProvider().Logger(
+		instrumentationName,
+		logs.WithInstrumentationVersion(instrumentationVersion),
+		logs.WithSchemaURL(semconv.SchemaURL),
+	)
+)
+
+func (c otlpCore) Write(ent zapcore.Entry, fields []zapcore.Field) error {
+
+	lrc := logs.LogRecordConfig{
+		Body: &ent.Message,
+		...
+	}
+	logRecord := logs.NewLogRecord(lrc)
+	logger.Emit(logRecord)
+}
+```
+
+and application initialization code:
+
+```go
+package main
+
+import (
+	"os"
+	"context"
+	"github.com/agoda-com/opentelemetry-logs-go"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+	"go.opentelemetry.io/otel/sdk/resource"
+	semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+	sdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+)
+
+func newResource() *resource.Resource {
+	host, _ := os.Hostname()
+	return resource.NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.ServiceName("otlplogs-example"),
+		semconv.ServiceVersion("0.0.1"),
+		semconv.HostName(host),
+	)
+}
+
+func main() {
+	ctx := context.Background()
+
+	exporter, _ := otlplogs.NewExporter(ctx, otlplogs.WithClient(otlplogshttp.NewClient()))
+	loggerProvider := sdk.NewLoggerProvider(
+		sdk.WithBatcher(exporter),
+		sdk.WithResource(newResource()),
+	)
+	otel.SetLoggerProvider(loggerProvider)
+
+	myInstrumentedLogger.Info("Hello OpenTelemetry")
+}
+```
+
+## References
+
+Logger Bridge API implementations for `zap`, `slog`, `zerolog` and other
+loggers can be found in https://github.com/agoda-com/opentelemetry-go
+
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go
new file mode 100644
index 0000000000000000000000000000000000000000..11886f1ed7e11e74b8e1ce8a453fa15237a97261
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go
@@ -0,0 +1,50 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogs
+
+import (
+	"context"
+	logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+)
+
+type Client interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Start should establish connection(s) to endpoint(s). It is
+	// called just once by the exporter, so the implementation
+	// does not need to worry about idempotence and locking.
+	Start(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Stop should close the connections. The function is called
+	// only once by the exporter, so the implementation does not
+	// need to worry about idempotence, but it may be called
+	// concurrently with UploadLogs, so proper
+	// locking is required. The function serves as a
+	// synchronization point - after the function returns, the
+	// process of closing connections is assumed to be finished.
+	Stop(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// UploadLogs should transform the passed logs to the wire
+	// format and send it to the collector. May be called
+	// concurrently.
+	UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b59295adbeb51434aab2e9a9fba28bf0e246b7b
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go
@@ -0,0 +1,112 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogs
+
+import (
+	"context"
+	"errors"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform"
+	logssdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+	"sync"
+)
+
+var (
+	errAlreadyStarted = errors.New("already started")
+)
+
+type Exporter struct {
+	client Client
+
+	mu      sync.RWMutex
+	started bool
+
+	startOnce sync.Once
+	stopOnce  sync.Once
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+	var err = errAlreadyStarted
+	e.startOnce.Do(func() {
+		e.mu.Lock()
+		e.started = true
+		e.mu.Unlock()
+		err = e.client.Start(ctx)
+	})
+
+	return err
+}
+
+func (e *Exporter) Shutdown(ctx context.Context) error {
+	e.mu.RLock()
+	started := e.started
+	e.mu.RUnlock()
+
+	if !started {
+		return nil
+	}
+
+	var err error
+
+	e.stopOnce.Do(func() {
+		err = e.client.Stop(ctx)
+		e.mu.Lock()
+		e.started = false
+		e.mu.Unlock()
+	})
+
+	return err
+}
+
+// Export exports a batch of logs.
+func (e *Exporter) Export(ctx context.Context, ll []logssdk.ReadableLogRecord) error {
+	protoLogs := logstransform.Logs(ll)
+	if len(protoLogs) == 0 {
+		return nil
+	}
+
+	err := e.client.UploadLogs(ctx, protoLogs)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// New creates new exporter with client
+// Deprecated: Use NewExporter instead. Will be removed in v0.1.0
+func New(ctx context.Context, client Client) (*Exporter, error) {
+	return NewExporter(ctx, WithClient(client))
+}
+
+// NewExporter creates new Exporter
+func NewExporter(ctx context.Context, options ...ExporterOption) (*Exporter, error) {
+	// Create new client using env variables
+	config := NewExporterConfig(options...)
+
+	for _, opt := range options {
+		config = opt.apply(config)
+	}
+
+	exp := &Exporter{
+		client: config.client,
+	}
+
+	if err := exp.Start(ctx); err != nil {
+		return nil, err
+	}
+	return exp, nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..36009a6bafea9913340781c929c4cfb0a9c13238
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/agoda-com/opentelemetry-logs-go/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+	GetEnv    func(string) string
+	ReadFile  func(string) ([]byte, error)
+	Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+	for _, o := range opts {
+		o(e)
+	}
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+	return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(v)
+		}
+	}
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b := strings.ToLower(v) == "true"
+			fn(b)
+		}
+	}
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			d, err := strconv.Atoi(v)
+			if err != nil {
+				global.Error(err, "parse duration", "input", v)
+				return
+			}
+			fn(time.Duration(d) * time.Millisecond)
+		}
+	}
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(stringToHeader(v))
+		}
+	}
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			u, err := url.Parse(v)
+			if err != nil {
+				global.Error(err, "parse url", "input", v)
+				return
+			}
+			fn(u)
+		}
+	}
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b, err := e.ReadFile(v)
+			if err != nil {
+				global.Error(err, "read tls ca cert file", "file", v)
+				return
+			}
+			c, err := createCertPool(b)
+			if err != nil {
+				global.Error(err, "create tls cert pool")
+				return
+			}
+			fn(c)
+		}
+	}
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		vc, okc := e.GetEnvValue(nc)
+		vk, okk := e.GetEnvValue(nk)
+		if !okc || !okk {
+			return
+		}
+		cert, err := e.ReadFile(vc)
+		if err != nil {
+			global.Error(err, "read tls client cert", "file", vc)
+			return
+		}
+		key, err := e.ReadFile(vk)
+		if err != nil {
+			global.Error(err, "read tls client key", "file", vk)
+			return
+		}
+		crt, err := tls.X509KeyPair(cert, key)
+		if err != nil {
+			global.Error(err, "create tls client key pair")
+			return
+		}
+		fn(crt)
+	}
+}
+
+func keyWithNamespace(ns, key string) string {
+	if ns == "" {
+		return key
+	}
+	return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+	headersPairs := strings.Split(value, ",")
+	headers := make(map[string]string)
+
+	for _, header := range headersPairs {
+		n, v, found := strings.Cut(header, "=")
+		if !found {
+			global.Error(errors.New("missing '="), "parse headers", "input", header)
+			continue
+		}
+		name, err := url.QueryUnescape(n)
+		if err != nil {
+			global.Error(err, "escape header key", "key", n)
+			continue
+		}
+		trimmedName := strings.TrimSpace(name)
+		value, err := url.QueryUnescape(v)
+		if err != nil {
+			global.Error(err, "escape header value", "value", v)
+			continue
+		}
+		trimmedValue := strings.TrimSpace(value)
+
+		headers[trimmedName] = trimmedValue
+	}
+
+	return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+	return cp, nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go
new file mode 100644
index 0000000000000000000000000000000000000000..04b78fcae6378e130b02fbe785d2bf7929dfa01d
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go
@@ -0,0 +1,160 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logstransform
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/resource"
+	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+	if len(attrs) == 0 {
+		return nil
+	}
+
+	out := make([]*commonpb.KeyValue, 0, len(attrs))
+	for _, kv := range attrs {
+		out = append(out, KeyValue(kv))
+	}
+	return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+	l := iter.Len()
+	if l == 0 {
+		return nil
+	}
+
+	out := make([]*commonpb.KeyValue, 0, l)
+	for iter.Next() {
+		out = append(out, KeyValue(iter.Attribute()))
+	}
+	return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+	return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+	return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+	av := new(commonpb.AnyValue)
+	switch v.Type() {
+	case attribute.BOOL:
+		av.Value = &commonpb.AnyValue_BoolValue{
+			BoolValue: v.AsBool(),
+		}
+	case attribute.BOOLSLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: boolSliceValues(v.AsBoolSlice()),
+			},
+		}
+	case attribute.INT64:
+		av.Value = &commonpb.AnyValue_IntValue{
+			IntValue: v.AsInt64(),
+		}
+	case attribute.INT64SLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: int64SliceValues(v.AsInt64Slice()),
+			},
+		}
+	case attribute.FLOAT64:
+		av.Value = &commonpb.AnyValue_DoubleValue{
+			DoubleValue: v.AsFloat64(),
+		}
+	case attribute.FLOAT64SLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: float64SliceValues(v.AsFloat64Slice()),
+			},
+		}
+	case attribute.STRING:
+		av.Value = &commonpb.AnyValue_StringValue{
+			StringValue: v.AsString(),
+		}
+	case attribute.STRINGSLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: stringSliceValues(v.AsStringSlice()),
+			},
+		}
+	default:
+		av.Value = &commonpb.AnyValue_StringValue{
+			StringValue: "INVALID",
+		}
+	}
+	return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_BoolValue{
+				BoolValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_IntValue{
+				IntValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_DoubleValue{
+				DoubleValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_StringValue{
+				StringValue: v,
+			},
+		}
+	}
+	return converted
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..8dc278f224bda179607810198b5dd824c2ca1554
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go
@@ -0,0 +1,126 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logstransform
+
+import (
+	sdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+	logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+	resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+	"time"
+)
+
+// Logs transforms OpenTelemetry LogRecord's into a OTLP ResourceLogs
+func Logs(sdl []sdk.ReadableLogRecord) []*logspb.ResourceLogs {
+
+	var resourceLogs []*logspb.ResourceLogs
+
+	for _, sd := range sdl {
+
+		lr := logRecord(sd)
+
+		var is *commonpb.InstrumentationScope
+		var schemaURL = ""
+		if sd.InstrumentationScope() != nil {
+			is = &commonpb.InstrumentationScope{
+				Name:    sd.InstrumentationScope().Name,
+				Version: sd.InstrumentationScope().Version,
+			}
+			schemaURL = sd.InstrumentationScope().SchemaURL
+		}
+
+		// Create a log resource
+		resourceLog := &logspb.ResourceLogs{
+			Resource: &resourcepb.Resource{
+				Attributes: KeyValues(sd.Resource().Attributes()),
+			},
+			// provide a resource description if available
+			ScopeLogs: []*logspb.ScopeLogs{
+				{
+					Scope:      is,
+					SchemaUrl:  schemaURL,
+					LogRecords: []*logspb.LogRecord{lr},
+				},
+			},
+		}
+
+		resourceLogs = append(resourceLogs, resourceLog)
+	}
+
+	return resourceLogs
+}
+
+func logRecord(record sdk.ReadableLogRecord) *logspb.LogRecord {
+	var body *commonpb.AnyValue = nil
+	if record.Body() != nil {
+		body = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_StringValue{
+				StringValue: *record.Body(),
+			},
+		}
+	}
+
+	var traceIDBytes []byte
+	if record.TraceId() != nil {
+		tid := *record.TraceId()
+		traceIDBytes = tid[:]
+	}
+	var spanIDBytes []byte
+	if record.SpanId() != nil {
+		sid := *record.SpanId()
+		spanIDBytes = sid[:]
+	}
+	var traceFlags byte = 0
+	if record.TraceFlags() != nil {
+		tf := *record.TraceFlags()
+		traceFlags = byte(tf)
+	}
+	var ts time.Time
+	if record.Timestamp() != nil {
+		ts = *record.Timestamp()
+	} else {
+		ts = record.ObservedTimestamp()
+	}
+
+	var kv []*commonpb.KeyValue
+	if record.Attributes() != nil {
+		kv = KeyValues(*record.Attributes())
+	}
+
+	var st = ""
+	if record.SeverityText() != nil {
+		st = *record.SeverityText()
+	}
+
+	var sn = logspb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
+	if record.SeverityNumber() != nil {
+		sn = logspb.SeverityNumber(*record.SeverityNumber())
+	}
+
+	logRecord := &logspb.LogRecord{
+		TimeUnixNano:         uint64(ts.UnixNano()),
+		ObservedTimeUnixNano: uint64(record.ObservedTimestamp().UnixNano()),
+		TraceId:              traceIDBytes,       // provide the associated trace ID if available
+		SpanId:               spanIDBytes,        // provide the associated span ID if available
+		Flags:                uint32(traceFlags), // provide the associated trace flags
+		Body:                 body,               // provide the associated log body if available
+		Attributes:           kv,                 // provide additional log attributes if available
+		SeverityText:         st,
+		SeverityNumber:       sn,
+	}
+	return logRecord
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..0827da0c656aeedd2387e47a71cfe88579c5f454
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go
@@ -0,0 +1,192 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlpconfig
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+	GetEnv:    os.Getenv,
+	ReadFile:  os.ReadFile,
+	Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+func stringToProtocol(u string) Protocol {
+	switch strings.ToLower(u) {
+	case string(ExporterProtocolGrpc):
+		return ExporterProtocolGrpc
+	case string(ExporterProtocolHttpProtobuf):
+		return ExporterProtocolHttpProtobuf
+	case string(ExporterProtocolHttpJson):
+		return ExporterProtocolHttpJson
+	default:
+		return ExporterProtocolHttpProtobuf
+	}
+}
+
+// ApplyEnvProtocol Apply Protocol from environment to provided default value
+// This function is subject to change or removal in future versions.
+func ApplyEnvProtocol(protocol Protocol) Protocol {
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithString("PROTOCOL", func(s string) {
+			protocol = stringToProtocol(s)
+		}),
+		envconfig.WithString("LOGS_PROTOCOL", func(s string) {
+			protocol = stringToProtocol(s)
+		}),
+	)
+
+	return protocol
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+	return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+	opts := []GenericOption{}
+
+	tlsConf := &tls.Config{}
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Logs.Endpoint = u.Host
+				// For OTLP/HTTP endpoint URLs without a per-signal
+				// configuration, the passed endpoint is used as a base URL
+				// and the signals are sent to these paths relative to that.
+				cfg.Logs.URLPath = path.Join(u.Path, DefaultLogsPath)
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithURL("LOGS_ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Logs.Endpoint = u.Host
+				// For endpoint URLs for OTLP/HTTP per-signal variables, the
+				// URL MUST be used as-is without any modification. The only
+				// exception is that if an URL contains no path part, the root
+				// path / MUST be used.
+				path := u.Path
+				if path == "" {
+					path = "/"
+				}
+				cfg.Logs.URLPath = path
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithString("PROTOCOL", func(s string) {
+			opts = append(opts, withProtocol(s))
+		}),
+		envconfig.WithString("LOGS_PROTOCOL", func(s string) {
+			opts = append(opts, withProtocol(s))
+		}),
+		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithCertPool("LOGS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithClientCert("LOGS_CLIENT_CERTIFICATE", "LOGS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithBool("LOGS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		envconfig.WithHeaders("LOGS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		WithEnvCompression("LOGS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		envconfig.WithDuration("LOGS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+	)
+
+	return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+	switch strings.ToLower(u.Scheme) {
+	case "http", "unix":
+		return WithInsecure()
+	default:
+		return WithSecure()
+	}
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+	return func(cfg Config) Config {
+		// For OTLP/gRPC endpoints, this is the target to which the
+		// exporter is going to send telemetry.
+		cfg.Logs.Endpoint = path.Join(u.Host, u.Path)
+		return cfg
+	}
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			cp := NoCompression
+			if v == "gzip" {
+				cp = GzipCompression
+			}
+
+			fn(cp)
+		}
+	}
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+	if b {
+		return WithInsecure()
+	}
+	return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if c.RootCAs != nil || len(c.Certificates) > 0 {
+			fn(c)
+		}
+	}
+}
+
+func withProtocol(b string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Protocol = stringToProtocol(b)
+		return cfg
+	})
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..83112bba70175f5e8b8371db27db7df564dfee77
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go
@@ -0,0 +1,348 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlpconfig
+
+import (
+	"crypto/tls"
+	"fmt"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+	"go.opentelemetry.io/otel"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/encoding/gzip"
+	"path"
+	"strings"
+	"time"
+)
+
+const (
+	// DefaultLogsPath is a default URL path for endpoint that
+	// receives logs.
+	DefaultLogsPath string = "/v1/logs"
+	// DefaultTimeout is a default max waiting time for the backend to process
+	// each logs batch.
+	DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+	SignalConfig struct {
+		Endpoint    string
+		Protocol    Protocol
+		Insecure    bool
+		TLSCfg      *tls.Config
+		Headers     map[string]string
+		Compression Compression
+		Timeout     time.Duration
+		URLPath     string
+
+		// gRPC configurations
+		GRPCCredentials credentials.TransportCredentials
+	}
+
+	Config struct {
+		// Signal specific configurations
+		Logs SignalConfig
+
+		RetryConfig retry.Config
+
+		// gRPC configurations
+		ReconnectionPeriod time.Duration
+		ServiceConfig      string
+		DialOptions        []grpc.DialOption
+		GRPCConn           *grpc.ClientConn
+	}
+)
+
+// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
+func CleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+	cfg := Config{
+		Logs: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+			URLPath:     DefaultLogsPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyHTTPEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	cfg.Logs.URLPath = CleanPath(cfg.Logs.URLPath, DefaultLogsPath)
+	return cfg
+}
+
+func GetUserAgentHeader() string {
+	return "OTel OTLP Exporter Go/" + otel.Version()
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+	cfg := Config{
+		Logs: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+			URLPath:     DefaultLogsPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+		DialOptions: []grpc.DialOption{grpc.WithUserAgent(GetUserAgentHeader())},
+	}
+	cfg = ApplyGRPCEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+
+	if cfg.ServiceConfig != "" {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+	}
+	// Priroritize GRPCCredentials over Insecure (passing both is an error).
+	if cfg.Logs.GRPCCredentials != nil {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Logs.GRPCCredentials))
+	} else if cfg.Logs.Insecure {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		// Default to using the host's root CA.
+		creds := credentials.NewTLS(nil)
+		cfg.Logs.GRPCCredentials = creds
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+	}
+	if cfg.Logs.Compression == GzipCompression {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+	}
+	if len(cfg.DialOptions) != 0 {
+		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
+	}
+	if cfg.ReconnectionPeriod != 0 {
+		p := grpc.ConnectParams{
+			Backoff:           backoff.DefaultConfig,
+			MinConnectTimeout: cfg.ReconnectionPeriod,
+		}
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+	}
+
+	return cfg
+}
+
+type (
+	// GenericOption applies an option to the HTTP or gRPC driver.
+	GenericOption interface {
+		ApplyHTTPOption(Config) Config
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// HTTPOption applies an option to the HTTP driver.
+	HTTPOption interface {
+		ApplyHTTPOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// GRPCOption applies an option to the gRPC driver.
+	GRPCOption interface {
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+	fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+	return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+	httpFn func(Config) Config
+	grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+	return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+	return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+	fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+	return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+	fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+	return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Endpoint = endpoint
+		return cfg
+	})
+}
+
+func WithCompression(compression Compression) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Compression = compression
+		return cfg
+	})
+}
+
+func WithURLPath(urlPath string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.URLPath = urlPath
+		return cfg
+	})
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.RetryConfig = rc
+		return cfg
+	})
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+	return newSplitOption(func(cfg Config) Config {
+		cfg.Logs.TLSCfg = tlsCfg.Clone()
+		return cfg
+	}, func(cfg Config) Config {
+		cfg.Logs.GRPCCredentials = credentials.NewTLS(tlsCfg)
+		return cfg
+	})
+}
+
+func WithInsecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Insecure = true
+		return cfg
+	})
+}
+
+func WithSecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Insecure = false
+		return cfg
+	})
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Headers = headers
+		return cfg
+	})
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Timeout = duration
+		return cfg
+	})
+}
+
+func WithProtocol(protocol Protocol) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Logs.Protocol = protocol
+		return cfg
+	})
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..d361b03833a2d0b3ab8b3219472196b748734dcb
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,58 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlpconfig
+
+const (
+	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
+	DefaultCollectorGRPCPort uint16 = 4317
+	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
+	DefaultCollectorHTTPPort uint16 = 4318
+	// DefaultCollectorHost is the host address the Exporter will attempt
+	// connect to if no collector address is provided.
+	DefaultCollectorHost string = "localhost"
+)
+
+type Protocol string
+
+const (
+	ExporterProtocolGrpc         Protocol = "grpc"
+	ExporterProtocolHttpProtobuf Protocol = "http/protobuf"
+	ExporterProtocolHttpJson     Protocol = "http/json"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression Compression = iota
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+	// MarshalProto tells the driver to send using the protobuf binary format.
+	MarshalProto Marshaler = iota
+	// MarshalJSON tells the driver to send using json format.
+	MarshalJSON
+)
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go
new file mode 100644
index 0000000000000000000000000000000000000000..88cf551edc603449d26d4913838ea4f9d64b2614
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go
@@ -0,0 +1,20 @@
+package otlpconfig
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+
+	return &tls.Config{
+		RootCAs: cp,
+	}, nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go
new file mode 100644
index 0000000000000000000000000000000000000000..39a421963da2d264c91541e1a164bfb6c03f74fd
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go
@@ -0,0 +1,48 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages.  Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+	ErrorMessage  string
+	RejectedItems int64
+	RejectedKind  string
+}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+	msg := ps.ErrorMessage
+	if msg == "" {
+		msg = "empty message"
+	}
+	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+var _ error = PartialSuccess{}
+
+// LogRecordPartialSuccessError returns an error describing a partial success
+// response for the log signal.
+func LogRecordPartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "logs",
+	}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go
new file mode 100644
index 0000000000000000000000000000000000000000..11c76e53c99645030ca54fdf93d4f94145880808
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go
@@ -0,0 +1,152 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package retry
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+	Enabled:         true,
+	InitialInterval: 5 * time.Second,
+	MaxInterval:     30 * time.Second,
+	MaxElapsedTime:  time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+	// Enabled indicates whether to not retry sending batches in case of
+	// export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before
+	// retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is
+	// reached the delay between consecutive retries will always be
+	// `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent
+	// trying to send a request/batch.  Once this value is reached, the data
+	// is discarded.
+	MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+	if !c.Enabled {
+		return func(ctx context.Context, fn func(context.Context) error) error {
+			return fn(ctx)
+		}
+	}
+
+	return func(ctx context.Context, fn func(context.Context) error) error {
+		// Do not use NewExponentialBackOff since it calls Reset and the code here
+		// must call Reset after changing the InitialInterval (this saves an
+		// unnecessary call to Now).
+		b := &backoff.ExponentialBackOff{
+			InitialInterval:     c.InitialInterval,
+			RandomizationFactor: backoff.DefaultRandomizationFactor,
+			Multiplier:          backoff.DefaultMultiplier,
+			MaxInterval:         c.MaxInterval,
+			MaxElapsedTime:      c.MaxElapsedTime,
+			Stop:                backoff.Stop,
+			Clock:               backoff.SystemClock,
+		}
+		b.Reset()
+
+		for {
+			err := fn(ctx)
+			if err == nil {
+				return nil
+			}
+
+			retryable, throttle := evaluate(err)
+			if !retryable {
+				return err
+			}
+
+			bOff := b.NextBackOff()
+			if bOff == backoff.Stop {
+				return fmt.Errorf("max retry time elapsed: %w", err)
+			}
+
+			// Wait for the greater of the backoff or throttle delay.
+			var delay time.Duration
+			if bOff > throttle {
+				delay = bOff
+			} else {
+				elapsed := b.GetElapsedTime()
+				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+					return fmt.Errorf("max retry time would elapse: %w", err)
+				}
+				delay = throttle
+			}
+
+			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+				return fmt.Errorf("%w: %s", ctxErr, err)
+			}
+		}
+	}
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait.  It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline.  This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+	timer := time.NewTimer(delay)
+	defer timer.Stop()
+
+	select {
+	case <-ctx.Done():
+		// Handle the case where the timer and context deadline end
+		// simultaneously by prioritizing the timer expiration nil value
+		// response.
+		select {
+		case <-timer.C:
+		default:
+			return ctx.Err()
+		}
+	case <-timer.C:
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ce4df5275b7ff3f8eb35c40df5a68c62ee597fc
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go
@@ -0,0 +1,67 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogs
+
+import (
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+)
+
+type ExporterConfig struct {
+	client Client
+}
+
+type ExporterOption interface {
+	apply(ExporterConfig) ExporterConfig
+}
+
+type exporterOptionFunc func(ExporterConfig) ExporterConfig
+
+func (fn exporterOptionFunc) apply(config ExporterConfig) ExporterConfig {
+	return fn(config)
+}
+
+// NewExporterConfig creates new configuration for exporter
+func NewExporterConfig(options ...ExporterOption) ExporterConfig {
+
+	config := ExporterConfig{}
+
+	for _, option := range options {
+		config = option.apply(config)
+	}
+
+	if config.client == nil {
+		// Default is http/protobuf client
+		protocol := otlpconfig.ApplyEnvProtocol(otlpconfig.ExporterProtocolHttpProtobuf)
+
+		if protocol == otlpconfig.ExporterProtocolGrpc {
+			config.client = otlplogsgrpc.NewClient()
+		} else {
+			config.client = otlplogshttp.NewClient()
+		}
+	}
+
+	return config
+}
+
+func WithClient(client Client) ExporterOption {
+	return exporterOptionFunc(func(cfg ExporterConfig) ExporterConfig {
+		cfg.client = client
+		return cfg
+	})
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd4b19cd87fa662768059df0819505320a300f40
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go
@@ -0,0 +1,292 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogsgrpc
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+
+	"go.opentelemetry.io/otel"
+	collogspb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
+	logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+
+	"google.golang.org/genproto/googleapis/rpc/errdetails"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+type grpcClient struct {
+	endpoint      string
+	dialOpts      []grpc.DialOption
+	metadata      metadata.MD
+	exportTimeout time.Duration
+	requestFunc   retry.RequestFunc
+
+	// stopCtx is used as a parent context for all exports. Therefore, when it
+	// is canceled with the stopFunc all exports are canceled.
+	stopCtx context.Context
+	// stopFunc cancels stopCtx, stopping any active exports.
+	stopFunc context.CancelFunc
+
+	// ourConn keeps track of where conn was created: true if created here on
+	// Start, or false if passed with an option. This is important on Shutdown
+	// as the conn should only be closed if created here on start. Otherwise,
+	// it is up to the processes that passed the conn to close it.
+	ourConn bool
+	conn    *grpc.ClientConn
+	tscMu   sync.RWMutex
+	tsc     collogspb.LogsServiceClient
+}
+
+func NewClient(opts ...Option) *grpcClient {
+	cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+	ctx, cancel := context.WithCancel(context.Background())
+
+	c := &grpcClient{
+		endpoint:      cfg.Logs.Endpoint,
+		exportTimeout: cfg.Logs.Timeout,
+		requestFunc:   cfg.RetryConfig.RequestFunc(retryable),
+		dialOpts:      cfg.DialOptions,
+		stopCtx:       ctx,
+		stopFunc:      cancel,
+		conn:          cfg.GRPCConn,
+	}
+
+	if len(cfg.Logs.Headers) > 0 {
+		c.metadata = metadata.New(cfg.Logs.Headers)
+	}
+
+	return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *grpcClient) Start(ctx context.Context) error {
+	if c.conn == nil {
+		// If the caller did not provide a ClientConn when the grpcClient was
+		// created, create one using the configuration they did provide.
+		conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
+		if err != nil {
+			return err
+		}
+		// Keep track that we own the lifecycle of this conn and need to close
+		// it on Shutdown.
+		c.ourConn = true
+		c.conn = conn
+	}
+
+	// The otlplogs.Client interface states this method is called just once,
+	// so no need to check if already started.
+	c.tscMu.Lock()
+	c.tsc = collogspb.NewLogsServiceClient(c.conn)
+	c.tscMu.Unlock()
+
+	return nil
+}
+
+var errAlreadyStopped = errors.New("the grpcClient is already stopped")
+
+// Stop shuts down the grpcClient.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the grpcClient. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadLogs method of the grpcClient. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All grpcClient held resources
+// will still be released in this situation.
+//
+// If the grpcClient has already stopped, an error will be returned describing
+// this.
+func (c *grpcClient) Stop(ctx context.Context) error {
+	// Make sure to return context error if the context is done when calling this method.
+	err := ctx.Err()
+
+	// Acquire the c.tscMu lock within the ctx lifetime.
+	acquired := make(chan struct{})
+	go func() {
+		c.tscMu.Lock()
+		close(acquired)
+	}()
+
+	select {
+	case <-ctx.Done():
+		// The Stop timeout is reached. Kill any remaining exports to force
+		// the clear of the lock and save the timeout error to return and
+		// signal the shutdown timed out before cleanly stopping.
+		c.stopFunc()
+		err = ctx.Err()
+
+		// To ensure the grpcClient is not left in a dirty state c.tsc needs to be
+		// set to nil. To avoid the race condition when doing this, ensure
+		// that all the exports are killed (initiated by c.stopFunc).
+		<-acquired
+	case <-acquired:
+	}
+	// Hold the tscMu lock for the rest of the function to ensure no new
+	// exports are started.
+	defer c.tscMu.Unlock()
+
+	// The otlplogs.Client interface states this method is called only
+	// once, but there is no guarantee it is called after Start. Ensure the
+	// grpcClient is started before doing anything and let the called know if they
+	// made a mistake.
+	if c.tsc == nil {
+		return errAlreadyStopped
+	}
+
+	// Clear c.tsc to signal the grpcClient is stopped.
+	c.tsc = nil
+
+	if c.ourConn {
+		closeErr := c.conn.Close()
+		// A context timeout error takes precedence over this error.
+		if err == nil && closeErr != nil {
+			err = closeErr
+		}
+	}
+	return err
+}
+
+var errShutdown = errors.New("the grpcClient is shutdown")
+
+// UploadLogs sends log records.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the grpcClient was created with.
+func (c *grpcClient) UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error {
+	// Hold a read lock to ensure a shut down initiated after this starts does
+	// not abandon the export. This read lock acquire has less priority than a
+	// write lock acquire (i.e. Stop), meaning if the grpcClient is shutting down
+	// this will come after the shut down.
+	c.tscMu.RLock()
+	defer c.tscMu.RUnlock()
+
+	if c.tsc == nil {
+		return errShutdown
+	}
+
+	ctx, cancel := c.exportContext(ctx)
+	defer cancel()
+
+	return c.requestFunc(ctx, func(iCtx context.Context) error {
+		resp, err := c.tsc.Export(iCtx, &collogspb.ExportLogsServiceRequest{
+			ResourceLogs: protoLogs,
+		})
+		if resp != nil && resp.PartialSuccess != nil {
+			msg := resp.PartialSuccess.GetErrorMessage()
+			n := resp.PartialSuccess.GetRejectedLogRecords()
+			if n != 0 || msg != "" {
+				err := internal.LogRecordPartialSuccessError(n, msg)
+				otel.Handle(err)
+			}
+		}
+		// nil is converted to OK.
+		if status.Code(err) == codes.OK {
+			// Success.
+			return nil
+		}
+		return err
+	})
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *grpcClient) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+	var (
+		ctx    context.Context
+		cancel context.CancelFunc
+	)
+
+	if c.exportTimeout > 0 {
+		ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+	} else {
+		ctx, cancel = context.WithCancel(parent)
+	}
+
+	if c.metadata.Len() > 0 {
+		ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+	}
+
+	// Unify the grpcClient stopCtx with the parent.
+	go func() {
+		select {
+		case <-ctx.Done():
+		case <-c.stopCtx.Done():
+			// Cancel the export as the shutdown has timed out.
+			cancel()
+		}
+	}()
+
+	return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+	s := status.Convert(err)
+	switch s.Code() {
+	case codes.Canceled,
+		codes.DeadlineExceeded,
+		codes.ResourceExhausted,
+		codes.Aborted,
+		codes.OutOfRange,
+		codes.Unavailable,
+		codes.DataLoss:
+		return true, throttleDelay(s)
+	}
+
+	// Not a retry-able error.
+	return false, 0
+}
+
+// throttleDelay returns a duration to wait for if an explicit throttle time
+// is included in the response status.
+func throttleDelay(s *status.Status) time.Duration {
+	for _, detail := range s.Details() {
+		if t, ok := detail.(*errdetails.RetryInfo); ok {
+			return t.RetryDelay.AsDuration()
+		}
+	}
+	return 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *grpcClient) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Endpoint string
+	}{
+		Type:     "otlphttpgrpc",
+		Endpoint: c.endpoint,
+	}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..a049ad1f5e7587cf4d88deb01294b51cdb0a3826
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go
@@ -0,0 +1,191 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogsgrpc
+
+import (
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+	"go.opentelemetry.io/otel"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+	applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+	converted := make([]otlpconfig.GRPCOption, len(opts))
+	for i, o := range opts {
+		converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying export of logs batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+	return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables grpcClient transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, grpcClient security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+	return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the exporter will connect to. If
+// unset, localhost:4317 will be used as a default.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.ReconnectionPeriod = rp
+		return cfg
+	})}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+	if compressor == "gzip" {
+		return otlpconfig.GzipCompression
+	}
+
+	otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+	return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC grpcClient to use when sending
+// requests. It is the responsibility of the caller to ensure that the
+// compressor set has been registered with google.golang.org/grpc/encoding.
+// This can be done by encoding.RegisterCompressor. Some compressors
+// auto-register on import, such as gzip, which can be registered by calling
+// `import _ "google.golang.org/grpc/encoding/gzip"`.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+	return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.Logs.GRPCCredentials = creds
+		return cfg
+	})}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.ServiceConfig = serviceConfig
+		return cfg
+	})}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.DialOptions = opts
+		return cfg
+	})}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The grpcClient
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.GRPCConn = conn
+		return cfg
+	})}
+}
+
+// WithTimeout sets the max amount of time a grpcClient will attempt to export a
+// batch of logs. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of logs is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of logs.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+	return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..c25d0a0514c5729cfa897700348ef581e5ccfbbc
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go
@@ -0,0 +1,371 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogshttp
+
+import (
+	"bytes"
+	"compress/gzip"
+	"context"
+	"fmt"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+	"go.opentelemetry.io/otel"
+	collogspb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
+	logspb "go.opentelemetry.io/proto/otlp/logs/v1"
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/proto"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"sync"
+	"time"
+)
+
+const contentTypeProto = "application/x-protobuf"
+const contentTypeJson = "application/json"
+
+var gzPool = sync.Pool{
+	New: func() interface{} {
+		w := gzip.NewWriter(io.Discard)
+		return w
+	},
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by other package.
+var ourTransport = &http.Transport{
+	Proxy: http.ProxyFromEnvironment,
+	DialContext: (&net.Dialer{
+		Timeout:   30 * time.Second,
+		KeepAlive: 30 * time.Second,
+	}).DialContext,
+	ForceAttemptHTTP2:     true,
+	MaxIdleConns:          100,
+	IdleConnTimeout:       90 * time.Second,
+	TLSHandshakeTimeout:   10 * time.Second,
+	ExpectContinueTimeout: 1 * time.Second,
+}
+
+type httpClient struct {
+	name        string
+	cfg         otlpconfig.SignalConfig
+	generalCfg  otlpconfig.Config
+	requestFunc retry.RequestFunc
+	client      *http.Client
+	stopCh      chan struct{}
+	stopOnce    sync.Once
+}
+
+// NewClient creates a new HTTP logs httpClient.
+func NewClient(opts ...Option) *httpClient {
+
+	cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
+
+	// Fix Protocol to Default if incorrect one was provided
+	if cfg.Logs.Protocol != otlpconfig.ExporterProtocolHttpJson && cfg.Logs.Protocol != otlpconfig.ExporterProtocolHttpProtobuf {
+		cfg.Logs.Protocol = otlpconfig.ExporterProtocolHttpProtobuf
+	}
+
+	client := &http.Client{
+		Transport: ourTransport,
+		Timeout:   cfg.Logs.Timeout,
+	}
+	if cfg.Logs.TLSCfg != nil {
+		transport := ourTransport.Clone()
+		transport.TLSClientConfig = cfg.Logs.TLSCfg
+		client.Transport = transport
+	}
+
+	stopCh := make(chan struct{})
+	return &httpClient{
+		name:        "logs",
+		cfg:         cfg.Logs,
+		generalCfg:  cfg,
+		requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+		stopCh:      stopCh,
+		client:      client,
+	}
+}
+
+// Start does nothing in a HTTP httpClient.
+func (d *httpClient) Start(ctx context.Context) error {
+	// nothing to do
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+	return nil
+}
+
+// Stop shuts down the httpClient and interrupt any in-flight request.
+func (d *httpClient) Stop(ctx context.Context) error {
+	d.stopOnce.Do(func() {
+		close(d.stopCh)
+	})
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+	return nil
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+	throttle int64
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+	if err == nil {
+		return false, 0
+	}
+
+	rErr, ok := err.(retryableError)
+	if !ok {
+		return false, 0
+	}
+
+	return true, time.Duration(rErr.throttle)
+}
+
+func (d *httpClient) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
+	// Unify the parent context Done signal with the httpClient's stop
+	// channel.
+	ctx, cancel := context.WithCancel(ctx)
+	go func(ctx context.Context, cancel context.CancelFunc) {
+		select {
+		case <-ctx.Done():
+			// Nothing to do, either cancelled or deadline
+			// happened.
+		case <-d.stopCh:
+			cancel()
+		}
+	}(ctx, cancel)
+	return ctx, cancel
+}
+
+func (d *httpClient) newRequest(body []byte) (request, error) {
+	u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath}
+	r, err := http.NewRequest(http.MethodPost, u.String(), nil)
+	if err != nil {
+		return request{Request: r}, err
+	}
+
+	r.Header.Set("User-Agent", otlpconfig.GetUserAgentHeader())
+
+	for k, v := range d.cfg.Headers {
+		r.Header.Set(k, v)
+	}
+	switch d.cfg.Protocol {
+	case otlpconfig.ExporterProtocolHttpJson:
+		r.Header.Set("Content-Type", contentTypeJson)
+	default:
+		r.Header.Set("Content-Type", contentTypeProto)
+	}
+
+	req := request{Request: r}
+	switch Compression(d.cfg.Compression) {
+	case NoCompression:
+		r.ContentLength = (int64)(len(body))
+		req.bodyReader = bodyReader(body)
+	case GzipCompression:
+		// Ensure the content length is not used.
+		r.ContentLength = -1
+		r.Header.Set("Content-Encoding", "gzip")
+
+		gz := gzPool.Get().(*gzip.Writer)
+		defer gzPool.Put(gz)
+
+		var b bytes.Buffer
+		gz.Reset(&b)
+
+		if _, err := gz.Write(body); err != nil {
+			return req, err
+		}
+		// Close needs to be called to ensure body if fully written.
+		if err := gz.Close(); err != nil {
+			return req, err
+		}
+
+		req.bodyReader = bodyReader(b.Bytes())
+	}
+
+	return req, nil
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+	return func() io.ReadCloser {
+		return io.NopCloser(bytes.NewReader(buf))
+	}
+}
+
+func (d *httpClient) getScheme() string {
+	if d.cfg.Insecure {
+		return "http"
+	}
+	return "https"
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+	*http.Request
+
+	// bodyReader allows the same body to be used for multiple requests.
+	bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+	r.Body = r.bodyReader()
+	r.Request = r.Request.WithContext(ctx)
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers.
+func newResponseError(header http.Header) error {
+	var rErr retryableError
+	if s, ok := header["Retry-After"]; ok {
+		if t, err := strconv.ParseInt(s[0], 10, 64); err == nil {
+			rErr.throttle = t
+		}
+	}
+	return rErr
+}
+
+func (e retryableError) Error() string {
+	return "retry-able request failure"
+}
+
+func (d *httpClient) UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error {
+
+	// Export the logs using the OTLP logs exporter httpClient
+	exportLogs := &collogspb.ExportLogsServiceRequest{
+		ResourceLogs: protoLogs,
+	}
+
+	// Serialize the OTLP logs payload
+	var rawRequest []byte
+	switch d.cfg.Protocol {
+	case otlpconfig.ExporterProtocolHttpJson:
+		rawRequest, _ = protojson.MarshalOptions{
+			UseProtoNames: false,
+		}.Marshal(exportLogs)
+	default:
+		rawRequest, _ = proto.Marshal(exportLogs)
+	}
+
+	ctx, cancel := d.contextWithStop(ctx)
+	defer cancel()
+
+	request, err := d.newRequest(rawRequest)
+	if err != nil {
+		return err
+	}
+
+	return d.requestFunc(ctx, func(ctx context.Context) error {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		request.reset(ctx)
+		resp, err := d.client.Do(request.Request)
+		if err != nil {
+			return err
+		}
+
+		if resp != nil && resp.Body != nil {
+			defer func() {
+				if err := resp.Body.Close(); err != nil {
+					otel.Handle(err)
+				}
+			}()
+		}
+
+		switch sc := resp.StatusCode; {
+		case sc >= 200 && sc <= 299:
+			// Success, do not retry.
+			// Read the partial success message, if any.
+			var respData bytes.Buffer
+			if _, err := io.Copy(&respData, resp.Body); err != nil {
+				return err
+			}
+
+			if respData.Len() != 0 {
+				var respProto collogspb.ExportLogsServiceResponse
+				switch d.cfg.Protocol {
+				case otlpconfig.ExporterProtocolHttpJson:
+					if err := protojson.Unmarshal(respData.Bytes(), &respProto); err != nil {
+						return err
+					}
+				default:
+					if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+						return err
+					}
+				}
+
+				// TODO: partialsuccess can't be handled properly by OTEL as current otlp.internal.PartialSuccess is custom
+				// need to have that interface in official OTEL otlp.internal package
+				if respProto.PartialSuccess != nil {
+					msg := respProto.PartialSuccess.GetErrorMessage()
+					n := respProto.PartialSuccess.GetRejectedLogRecords()
+					if n != 0 || msg != "" {
+						err := internal.LogRecordPartialSuccessError(n, msg)
+						otel.Handle(err)
+					}
+				}
+			}
+			return nil
+		case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
+			// Retry-able failures.  Drain the body to reuse the connection.
+			if _, err := io.Copy(io.Discard, resp.Body); err != nil {
+				otel.Handle(err)
+			}
+			return newResponseError(resp.Header)
+		default:
+			buffer := make([]byte, 4096)
+			_, _ = resp.Body.Read(buffer)
+			if len(buffer) == 0 {
+				return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status)
+			}
+			return fmt.Errorf("failed to send to %s: %s\n%s", request.URL, resp.Status, buffer)
+		}
+	})
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (d *httpClient) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Endpoint string
+		Insecure bool
+	}{
+		Type:     string(d.cfg.Protocol),
+		Endpoint: d.cfg.Endpoint,
+		Insecure: d.cfg.Insecure,
+	}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d0e4da006377b5c384aa642c6fc113aba5c1077
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go
@@ -0,0 +1,127 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otlplogshttp
+
+import (
+	"crypto/tls"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry"
+	"time"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression otlpconfig.Compression
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression = Compression(otlpconfig.NoCompression)
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression = Compression(otlpconfig.GzipCompression)
+)
+
+// Option applies an option to the HTTP httpClient.
+type Option interface {
+	applyHTTPOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption {
+	converted := make([]otlpconfig.HTTPOption, len(opts))
+	for i, o := range opts {
+		converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying batches in case of export
+// failure using an exponential backoff.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	otlpconfig.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config {
+	return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpoint allows one to set the address of the collector
+// endpoint that the driver will use to send logs. If
+// unset, it will instead try to use
+// the default endpoint (localhost:4318). Note that the endpoint
+// must not contain any URL path.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithJsonProtocol will apply http/json protocol to Http client
+func WithJsonProtocol() Option {
+	return wrappedOption{otlpconfig.WithProtocol(otlpconfig.ExporterProtocolHttpJson)}
+}
+
+// WithProtobufProtocol will apply http/protobuf protocol to Http client
+func WithProtobufProtocol() Option {
+	return wrappedOption{otlpconfig.WithProtocol(otlpconfig.ExporterProtocolHttpProtobuf)}
+}
+
+// WithCompression tells the driver to compress the sent data.
+func WithCompression(compression Compression) Option {
+	return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
+}
+
+// WithURLPath allows one to override the default URL path used
+// for sending logs. If unset, default ("/v1/logs") will be used.
+func WithURLPath(urlPath string) Option {
+	return wrappedOption{otlpconfig.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig can be used to set up a custom TLS
+// configuration for the httpClient used to send payloads to the
+// collector. Use it if you want to use a custom certificate.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+	return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure tells the driver to connect to the collector using the
+// HTTP scheme, instead of HTTPS.
+func WithInsecure() Option {
+	return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithHeaders allows one to tell the driver to send additional HTTP
+// headers with the payloads. Specifying headers like Content-Length,
+// Content-Encoding and Content-Type may result in a broken driver.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTimeout tells the driver the max waiting time for the backend to process
+// each logs batch.  If unset, the default will be 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry configures the retry policy for transient errors that may occurs
+// when exporting logs. An exponential back-off algorithm is used to ensure
+// endpoints are not overwhelmed with retries. If unset, the default retry
+// policy will retry after 5 seconds and increase exponentially after each
+// error for a total of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+	return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go
new file mode 100644
index 0000000000000000000000000000000000000000..801f40f2073366c3ec61c00125e9ea97d0093c7a
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go
@@ -0,0 +1,70 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // Package global import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"log"
+	"os"
+	"sync/atomic"
+	"unsafe"
+
+	"github.com/go-logr/logr"
+	"github.com/go-logr/stdr"
+)
+
+// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
+//
+// The default logger uses stdr which is backed by the standard `log.Logger`
+// interface. This logger will only show messages at the Error Level.
+var globalLogger unsafe.Pointer
+
+func init() {
+	SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+}
+
+// SetLogger overrides the globalLogger with l.
+//
+// To see Warn messages use a logger with `l.V(1).Enabled() == true`
+// To see Info messages use a logger with `l.V(4).Enabled() == true`
+// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
+func SetLogger(l logr.Logger) {
+	atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
+}
+
+func getLogger() logr.Logger {
+	return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
+}
+
+// Info prints messages about the general state of the API or SDK.
+// This should usually be less than 5 messages a minute.
+func Info(msg string, keysAndValues ...interface{}) {
+	getLogger().V(4).Info(msg, keysAndValues...)
+}
+
+// Error prints messages about exceptional states of the API or SDK.
+func Error(err error, msg string, keysAndValues ...interface{}) {
+	getLogger().Error(err, msg, keysAndValues...)
+}
+
+// Debug prints messages about all internal changes in the API or SDK.
+func Debug(msg string, keysAndValues ...interface{}) {
+	getLogger().V(8).Info(msg, keysAndValues...)
+}
+
+// Warn prints messages about warnings in the API or SDK.
+// Not an error but is likely more important than an informational event.
+func Warn(msg string, keysAndValues ...interface{}) {
+	getLogger().V(1).Info(msg, keysAndValues...)
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..ab78fd29b77ba2a8c5911eb1412b952c46cd32ee
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go
@@ -0,0 +1,128 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package global
+
+import (
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+	"sync"
+	"sync/atomic"
+)
+
+// loggerProvider is a placeholder for a configured SDK LoggerProvider.
+//
+// All LoggerProvider functionality is forwarded to a delegate once
+// configured.
+type loggerProvider struct {
+	mtx      sync.Mutex
+	loggers  map[il]*logger
+	delegate logs.LoggerProvider
+}
+
+// Compile-time guarantee that loggerProvider implements the LoggerProvider
+// interface.
+var _ logs.LoggerProvider = &loggerProvider{}
+
+// setDelegate configures p to delegate all LoggerProvider functionality to
+// provider.
+//
+// All Loggers provided prior to this function call are switched out to be
+// Loggers provided by provider.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *loggerProvider) setDelegate(provider logs.LoggerProvider) {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	p.delegate = provider
+
+	if len(p.loggers) == 0 {
+		return
+	}
+
+	for _, t := range p.loggers {
+		t.setDelegate(provider)
+	}
+
+	p.loggers = nil
+}
+
+// Logger implements LoggerProvider.
+func (p *loggerProvider) Logger(name string, opts ...logs.LoggerOption) logs.Logger {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	if p.delegate != nil {
+		return p.delegate.Logger(name, opts...)
+	}
+
+	// At this moment it is guaranteed that no sdk is installed, save the logger in the loggers map.
+
+	c := logs.NewLoggerConfig(opts...)
+	key := il{
+		name:    name,
+		version: c.InstrumentationVersion(),
+	}
+
+	if p.loggers == nil {
+		p.loggers = make(map[il]*logger)
+	}
+
+	if val, ok := p.loggers[key]; ok {
+		return val
+	}
+
+	t := &logger{name: name, opts: opts, provider: p}
+	p.loggers[key] = t
+	return t
+}
+
+type il struct {
+	name    string
+	version string
+}
+
+// logger is a placeholder for a logs.Logger.
+//
+// All Logger functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopLogger.
+type logger struct {
+	name     string
+	opts     []logs.LoggerOption
+	provider *loggerProvider
+
+	delegate atomic.Value
+}
+
+// Compile-time guarantee that logger implements the logs.Logger interface.
+var _ logs.Logger = &logger{}
+
+func (t *logger) Emit(logRecord logs.LogRecord) {
+	delegate := t.delegate.Load()
+	if delegate != nil {
+		delegate.(logs.Logger).Emit(logRecord)
+	}
+}
+
+// setDelegate configures t to delegate all Logger functionality to Loggers
+// created by provider.
+//
+// All subsequent calls to the Logger methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (t *logger) setDelegate(provider logs.LoggerProvider) {
+	t.delegate.Store(provider.Logger(t.name, t.opts...))
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go
new file mode 100644
index 0000000000000000000000000000000000000000..53219030ffb90e3a04d3a9fcf4bc67d0b4b77559
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go
@@ -0,0 +1,66 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package global
+
+import (
+	"errors"
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+	"sync"
+	"sync/atomic"
+)
+
+type (
+	loggerProviderHolder struct {
+		lp logs.LoggerProvider
+	}
+)
+
+var (
+	globalOtelLogger = defaultLoggerValue()
+
+	delegateLoggerOnce sync.Once
+)
+
+// LoggerProvider is the internal implementation for global.LoggerProvider.
+func LoggerProvider() logs.LoggerProvider {
+	return globalOtelLogger.Load().(loggerProviderHolder).lp
+}
+
+// SetLoggerProvider is the internal implementation for global.SetLoggerProvider.
+func SetLoggerProvider(lp logs.LoggerProvider) {
+	current := LoggerProvider()
+
+	if _, cOk := current.(*loggerProvider); cOk {
+		if _, tpOk := lp.(*loggerProvider); tpOk && current == lp {
+			// Do not assign the default delegating LoggerProvider to delegate
+			// to itself.
+			Error(
+				errors.New("no delegate configured in logger provider"),
+				"Setting logger provider to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	globalOtelLogger.Store(loggerProviderHolder{lp: lp})
+}
+
+func defaultLoggerValue() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(loggerProviderHolder{lp: &loggerProvider{}})
+	return v
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..490ba903f31b6757b00e8144ad426620fb673683
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go
@@ -0,0 +1,35 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package otel
+
+import (
+	"github.com/agoda-com/opentelemetry-logs-go/internal/global"
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+)
+
+// GetLoggerProvider returns the registered global logger provider.
+// If none is registered then an instance of NoopLoggerProvider is returned.
+//
+// loggerProvider := otel.GetLoggerProvider()
+func GetLoggerProvider() logs.LoggerProvider {
+	return global.LoggerProvider()
+}
+
+// SetLoggerProvider registers `lp` as the global logger provider.
+func SetLoggerProvider(lp logs.LoggerProvider) {
+	global.SetLoggerProvider(lp)
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..b8ca22273d2ee0252902400a1c7b4c53b4c35c1b
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go
@@ -0,0 +1,89 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs // Package logs import "github.com/agoda-com/opentelemetry-logs-go/logs"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// LoggerConfig is a group of options for a Logger.
+type LoggerConfig struct {
+	instrumentationVersion string
+	// Schema URL of the telemetry emitted by the Logger.
+	schemaURL string
+	attrs     attribute.Set
+}
+
+// InstrumentationVersion returns the version of the library providing instrumentation.
+func (t *LoggerConfig) InstrumentationVersion() string {
+	return t.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *LoggerConfig) InstrumentationAttributes() attribute.Set {
+	return t.attrs
+}
+
+// SchemaURL returns the Schema URL of the telemetry emitted by the Logger.
+func (t *LoggerConfig) SchemaURL() string {
+	return t.schemaURL
+}
+
+// NewLoggerConfig applies all the options to a returned LoggerConfig.
+func NewLoggerConfig(options ...LoggerOption) LoggerConfig {
+	var config LoggerConfig
+	for _, option := range options {
+		config = option.apply(config)
+	}
+	return config
+}
+
+// LoggerOption applies an option to a LoggerConfig.
+type LoggerOption interface {
+	apply(LoggerConfig) LoggerConfig
+}
+
+type loggerOptionFunc func(LoggerConfig) LoggerConfig
+
+func (fn loggerOptionFunc) apply(cfg LoggerConfig) LoggerConfig {
+	return fn(cfg)
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) LoggerOption {
+	return loggerOptionFunc(func(cfg LoggerConfig) LoggerConfig {
+		cfg.instrumentationVersion = version
+		return cfg
+	})
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption {
+	return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
+		config.attrs = attribute.NewSet(attr...)
+		return config
+	})
+}
+
+// WithSchemaURL sets the schema URL for the Logger.
+func WithSchemaURL(schemaURL string) LoggerOption {
+	return loggerOptionFunc(func(cfg LoggerConfig) LoggerConfig {
+		cfg.schemaURL = schemaURL
+		return cfg
+	})
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..db5f43546fd120f618077ae1b5ec3b9023938040
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go
@@ -0,0 +1,68 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package logs provides an implementation of the logging part of the
+OpenTelemetry API.
+
+This package defines a log backend API. The API is not intended to be called by application developers directly.
+It is provided for logging library authors to build log appenders, which use this API to bridge between existing
+logging libraries and the OpenTelemetry log data model.
+
+To participate in logging a LogRecord needs to be created for the
+operation being performed as part of a logging workflow. In its simplest form:
+
+		var logger logger.Logger
+
+		func init() {
+			logger = otel.Logger()
+		}
+
+		func operation(ctx context.Context) {
+	        logRecord := logger.NewLogRecord(..)
+	        logger.Emit(logRecord)
+		}
+
+A Logger is unique to the instrumentation and is used to create Logs.
+Instrumentation should be designed to accept a LoggerProvider from which it
+can create its own unique Logger. Alternatively, the registered global
+LoggerProvider from the github.com/agoda-com/opentelemetry-logs-go package can be used as
+a default.
+
+	const (
+		name    = "instrumentation/package/name"
+		version = "0.1.0"
+	)
+
+	type Instrumentation struct {
+		logger logging.Logger
+	}
+
+	func NewInstrumentation(tp logging.LoggerProvider) *Instrumentation {
+		if lp == nil {
+			lp = otel.LoggerProvider()
+		}
+		return &Instrumentation{
+			logger: lp.Logger(name, logs.WithInstrumentationVersion(version)),
+		}
+	}
+
+	func operation(ctx context.Context, inst *Instrumentation) {
+
+		// ...
+	}
+*/
+package logs // import "github.com/agoda-com/opentelemetry-logs-go/logs"
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..cfe454e4b61ef6a16830539cf81b4a9cdd8ad956
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go
@@ -0,0 +1,167 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+	"time"
+)
+
+// LogRecordConfig contains mutable fields usable for constructing
+// an immutable LogRecord.
+type LogRecordConfig struct {
+	Timestamp            *time.Time
+	ObservedTimestamp    time.Time
+	TraceId              *trace.TraceID
+	SpanId               *trace.SpanID
+	TraceFlags           *trace.TraceFlags
+	SeverityText         *string
+	SeverityNumber       *SeverityNumber
+	Body                 *string
+	Resource             *resource.Resource
+	InstrumentationScope *instrumentation.Scope
+	Attributes           *[]attribute.KeyValue
+}
+
+// NewLogRecord constructs a LogRecord using values from the provided
+// LogRecordConfig.
+func NewLogRecord(config LogRecordConfig) LogRecord {
+	return LogRecord{
+		timestamp:            config.Timestamp,
+		observedTimestamp:    config.ObservedTimestamp,
+		traceId:              config.TraceId,
+		spanId:               config.SpanId,
+		traceFlags:           config.TraceFlags,
+		severityText:         config.SeverityText,
+		severityNumber:       config.SeverityNumber,
+		body:                 config.Body,
+		resource:             config.Resource,
+		instrumentationScope: config.InstrumentationScope,
+		attributes:           config.Attributes,
+	}
+}
+
+// LogRecord is an implementation of the OpenTelemetry Log API
+// representing the individual component of a log.
+// see https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition
+type LogRecord struct {
+	timestamp            *time.Time
+	observedTimestamp    time.Time
+	traceId              *trace.TraceID
+	spanId               *trace.SpanID
+	traceFlags           *trace.TraceFlags
+	severityText         *string
+	severityNumber       *SeverityNumber
+	body                 *string
+	resource             *resource.Resource
+	instrumentationScope *instrumentation.Scope
+	attributes           *[]attribute.KeyValue
+}
+
+func (l LogRecord) Timestamp() *time.Time                        { return l.timestamp }
+func (l LogRecord) ObservedTimestamp() time.Time                 { return l.observedTimestamp }
+func (l LogRecord) TraceId() *trace.TraceID                      { return l.traceId }
+func (l LogRecord) SpanId() *trace.SpanID                        { return l.spanId }
+func (l LogRecord) TraceFlags() *trace.TraceFlags                { return l.traceFlags }
+func (l LogRecord) SeverityText() *string                        { return l.severityText }
+func (l LogRecord) SeverityNumber() *SeverityNumber              { return l.severityNumber }
+func (l LogRecord) Body() *string                                { return l.body }
+func (l LogRecord) Resource() *resource.Resource                 { return l.resource }
+func (l LogRecord) InstrumentationScope() *instrumentation.Scope { return l.instrumentationScope }
+func (l LogRecord) Attributes() *[]attribute.KeyValue            { return l.attributes }
+func (l LogRecord) private()                                     {}
+
+// SeverityNumber Possible values for LogRecord.SeverityNumber.
+type SeverityNumber int32
+
+const (
+	// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
+	UNSPECIFIED SeverityNumber = 0
+	TRACE       SeverityNumber = 1
+	TRACE2      SeverityNumber = 2
+	TRACE3      SeverityNumber = 3
+	TRACE4      SeverityNumber = 4
+	DEBUG       SeverityNumber = 5
+	DEBUG2      SeverityNumber = 6
+	DEBUG3      SeverityNumber = 7
+	DEBUG4      SeverityNumber = 8
+	INFO        SeverityNumber = 9
+	INFO2       SeverityNumber = 10
+	INFO3       SeverityNumber = 11
+	INFO4       SeverityNumber = 12
+	WARN        SeverityNumber = 13
+	WARN2       SeverityNumber = 14
+	WARN3       SeverityNumber = 15
+	WARN4       SeverityNumber = 16
+	ERROR       SeverityNumber = 17
+	ERROR2      SeverityNumber = 18
+	ERROR3      SeverityNumber = 19
+	ERROR4      SeverityNumber = 20
+	FATAL       SeverityNumber = 21
+	FATAL2      SeverityNumber = 22
+	FATAL3      SeverityNumber = 23
+	FATAL4      SeverityNumber = 24
+)
+
+// Logger is the creator of Logs
+type Logger interface {
+	// Emit emits a log record
+	Emit(logRecord LogRecord)
+}
+
+// LoggerProvider provides Loggers that are used by instrumentation code to
+// log computational workflows.
+//
+// A LoggerProvider is the collection destination of logs
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Logs are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Loggers to instrument code.
+type LoggerProvider interface {
+	// Logger returns a unique Logger scoped to be used by instrumentation code
+	// to log computational workflows. The scope and identity of that
+	// instrumentation code is uniquely defined by the name and options passed.
+	//
+	// The passed name needs to uniquely identify instrumentation code.
+	// Therefore, it is recommended that name is the Go package name of the
+	// library providing instrumentation (note: not the code being
+	// instrumented). Instrumentation libraries can have multiple versions,
+	// therefore, the WithInstrumentationVersion option should be used to
+	// distinguish these different codebases. Additionally, instrumentation
+	// libraries may sometimes use traces to communicate different domains of
+	// workflow data (i.e. using logs to communicate workflow events only). If
+	// this is the case, the WithScopeAttributes option should be used to
+	// uniquely identify Loggers that handle the different domains of workflow
+	// data.
+	//
+	// If the same name and options are passed multiple times, the same Logger
+	// will be returned (it is up to the implementation if this will be the
+	// same underlying instance of that Logger or not). It is not necessary to
+	// call this multiple times with the same name and options to get an
+	// up-to-date Logger. All implementations will ensure any LoggerProvider
+	// configuration changes are propagated to all provided Loggers.
+	//
+	// If name is empty, then an implementation defined default name will be
+	// used instead.
+	//
+	// This method is safe to call concurrently.
+	Logger(name string, options ...LoggerOption) Logger
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f4bb1d2dedc8d3196a4455fd0a9a7487b283dbc
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go
@@ -0,0 +1,38 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+// NewNoopLoggerProvider returns an implementation of LoggerProvider that
+// performs no operations. The Logger created from the returned
+// LoggerProvider also perform no operations.
+func NewNoopLoggerProvider() LoggerProvider {
+	return noopLoggerProvider{}
+}
+
+type noopLoggerProvider struct{}
+
+var _ LoggerProvider = noopLoggerProvider{}
+
+func (p noopLoggerProvider) Logger(string, ...LoggerOption) Logger {
+	return noopLogger{}
+}
+
+type noopLogger struct{}
+
+var _ Logger = noopLogger{}
+
+func (n noopLogger) Emit(logRecord LogRecord) {}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..80fff067599037e081e1053aa804107a14680882
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go
@@ -0,0 +1,105 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package env
+
+import (
+	"os"
+	"strconv"
+)
+
+// Environment variable names.
+const (
+	// BatchLogsProcessorScheduleDelayKey is the delay interval between two
+	// consecutive exports (i.e. 5000).
+	BatchLogsProcessorScheduleDelayKey = "OTEL_BLRP_SCHEDULE_DELAY"
+	// BatchLogsProcessorExportTimeoutKey is the maximum allowed time to
+	// export data (i.e. 3000).
+	BatchLogsProcessorExportTimeoutKey = "OTEL_BLRP_EXPORT_TIMEOUT"
+	// BatchLogsProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+	BatchLogsProcessorMaxQueueSizeKey = "OTEL_BLRP_MAX_QUEUE_SIZE"
+	// BatchLogsProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+	// 512). Note: it must be less than or equal to
+	// EnvBatchLogsProcessorMaxQueueSize.
+	BatchLogsProcessorMaxExportBatchSizeKey = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+	for _, key := range keys {
+		value := os.Getenv(key)
+		if value == "" {
+			continue
+		}
+
+		intValue, err := strconv.Atoi(value)
+		if err != nil {
+			//envconfig.Info("Got invalid value, number value expected.", key, value)
+			return defaultValue
+		}
+
+		return intValue
+	}
+
+	return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+	value := os.Getenv(key)
+	if value == "" {
+		return defaultValue
+	}
+
+	intValue, err := strconv.Atoi(value)
+	if err != nil {
+		//global.Info("Got invalid value, number value expected.", key, value)
+		return defaultValue
+	}
+
+	return intValue
+}
+
+// BatchLogsProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BLRP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchLogsProcessorScheduleDelay(defaultValue int) int {
+	return IntEnvOr(BatchLogsProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchLogsProcessorExportTimeout returns the environment variable value for
+// the OTEL_BLRP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchLogsProcessorExportTimeout(defaultValue int) int {
+	return IntEnvOr(BatchLogsProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchLogsProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BLRP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchLogsProcessorMaxQueueSize(defaultValue int) int {
+	return IntEnvOr(BatchLogsProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchLogsProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BLRP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchLogsProcessorMaxExportBatchSize(defaultValue int) int {
+	return IntEnvOr(BatchLogsProcessorMaxExportBatchSizeKey, defaultValue)
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..0691f6ac679bbee9f1d2a0337db46c0a46819cba
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go
@@ -0,0 +1,435 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+	"context"
+	"github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env"
+	"go.opentelemetry.io/otel"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Defaults for BatchLogRecordProcessorOptions.
+const (
+	DefaultMaxQueueSize       = 2048
+	DefaultScheduleDelay      = 5000
+	DefaultExportTimeout      = 30000
+	DefaultMaxExportBatchSize = 512
+)
+
+// BatchLogRecordProcessorOption configures a BatchLogsProcessor.
+type BatchLogRecordProcessorOption func(o *BatchLogRecordProcessorOptions)
+
+// BatchLogRecordProcessorOptions is configuration settings for a
+// BatchLogsProcessor.
+type BatchLogRecordProcessorOptions struct {
+	// MaxQueueSize is the maximum queue size to buffer logs for delayed processing. If the
+	// queue gets full it drops the logs. Use BlockOnQueueFull to change this behavior.
+	// The default value of MaxQueueSize is 2048.
+	MaxQueueSize int
+
+	// BatchTimeout is the maximum duration for constructing a batch. Processor
+	// forcefully sends available logs when timeout is reached.
+	// The default value of BatchTimeout is 5000 msec.
+	BatchTimeout time.Duration
+
+	// ExportTimeout specifies the maximum duration for exporting logs. If the timeout
+	// is reached, the export will be cancelled.
+	// The default value of ExportTimeout is 30000 msec.
+	ExportTimeout time.Duration
+
+	// MaxExportBatchSize is the maximum number of logs to process in a single batch.
+	// If there are more than one batch worth of logs then it processes multiple batches
+	// of logs one batch after the other without any delay.
+	// The default value of MaxExportBatchSize is 512.
+	MaxExportBatchSize int
+
+	// BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+	// AND if BlockOnQueueFull is set to true.
+	// Blocking option should be used carefully as it can severely affect the performance of an
+	// application.
+	BlockOnQueueFull bool
+}
+
+// WithMaxQueueSize returns a BatchLogRecordProcessorOption that configures the
+// maximum queue size allowed for a BatchLogRecordProcessor.
+func WithMaxQueueSize(size int) BatchLogRecordProcessorOption {
+	return func(o *BatchLogRecordProcessorOptions) {
+		o.MaxQueueSize = size
+	}
+}
+
+// WithMaxExportBatchSize returns a BatchLogRecordProcessorOption that configures
+// the maximum export batch size allowed for a BatchLogRecordProcessor.
+func WithMaxExportBatchSize(size int) BatchLogRecordProcessorOption {
+	return func(o *BatchLogRecordProcessorOptions) {
+		o.MaxExportBatchSize = size
+	}
+}
+
+// WithBatchTimeout returns a BatchLogRecordProcessorOption that configures the
+// maximum delay allowed for a BatchLogRecordProcessor before it will export any
+// held log (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchLogRecordProcessorOption {
+	return func(o *BatchLogRecordProcessorOptions) {
+		o.BatchTimeout = delay
+	}
+}
+
+// WithExportTimeout returns a BatchLogRecordProcessorOption that configures the
+// amount of time a BatchLogRecordProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchLogRecordProcessorOption {
+	return func(o *BatchLogRecordProcessorOptions) {
+		o.ExportTimeout = timeout
+	}
+}
+
+// WithBlocking returns a BatchLogRecordProcessorOption that configures a
+// BatchLogRecordProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchLogRecordProcessorOption {
+	return func(o *BatchLogRecordProcessorOptions) {
+		o.BlockOnQueueFull = true
+	}
+}
+
+// batchLogRecordProcessor is a LogRecordProcessor that batches asynchronously-received
+// logs and sends them to a logs.Exporter when complete.
+type batchLogRecordProcessor struct {
+	e LogRecordExporter
+	o BatchLogRecordProcessorOptions
+
+	queue   chan ReadableLogRecord
+	dropped uint32
+
+	batch      []ReadableLogRecord
+	batchMutex sync.Mutex
+	timer      *time.Timer
+	stopWait   sync.WaitGroup
+	stopOnce   sync.Once
+	stopCh     chan struct{}
+	stopped    atomic.Bool
+}
+
+func (lrp *batchLogRecordProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	lrp.stopOnce.Do(func() {
+		lrp.stopped.Store(true)
+		wait := make(chan struct{})
+		go func() {
+			close(lrp.stopCh)
+			lrp.stopWait.Wait()
+			if lrp.e != nil {
+				if err := lrp.e.Shutdown(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+			close(wait)
+		}()
+		// Wait until the wait group is done or the context is cancelled
+		select {
+		case <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	})
+	return err
+}
+
+var _ LogRecordProcessor = (*batchLogRecordProcessor)(nil)
+
+// NewBatchLogRecordProcessor creates a new LogRecordProcessor that will send completed
+// log batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the logs processor will perform no action.
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#batching-processor
+func NewBatchLogRecordProcessor(exporter LogRecordExporter, options ...BatchLogRecordProcessorOption) LogRecordProcessor {
+	maxQueueSize := env.BatchLogsProcessorMaxQueueSize(DefaultMaxQueueSize)
+	maxExportBatchSize := env.BatchLogsProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+	if maxExportBatchSize > maxQueueSize {
+		if DefaultMaxExportBatchSize > maxQueueSize {
+			maxExportBatchSize = maxQueueSize
+		} else {
+			maxExportBatchSize = DefaultMaxExportBatchSize
+		}
+	}
+
+	o := BatchLogRecordProcessorOptions{
+		BatchTimeout:       time.Duration(env.BatchLogsProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+		ExportTimeout:      time.Duration(env.BatchLogsProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+		MaxQueueSize:       maxQueueSize,
+		MaxExportBatchSize: maxExportBatchSize,
+	}
+	for _, opt := range options {
+		opt(&o)
+	}
+	blp := &batchLogRecordProcessor{
+		e:      exporter,
+		o:      o,
+		batch:  make([]ReadableLogRecord, 0, o.MaxExportBatchSize),
+		timer:  time.NewTimer(o.BatchTimeout),
+		queue:  make(chan ReadableLogRecord, o.MaxQueueSize),
+		stopCh: make(chan struct{}),
+	}
+
+	blp.stopWait.Add(1)
+	go func() {
+		defer blp.stopWait.Done()
+		blp.processQueue()
+		blp.drainQueue()
+	}()
+
+	return blp
+}
+
+func (lrp *batchLogRecordProcessor) OnEmit(rol ReadableLogRecord) {
+
+	// Do not enqueue spans after Shutdown.
+	if lrp.stopped.Load() {
+		return
+	}
+	// Do not enqueue logs if we are just going to drop them.
+	if lrp.e == nil {
+		return
+	}
+
+	lrp.enqueue(rol)
+}
+
+type forceFlushLogs struct {
+	ReadableLogRecord
+	flushed chan struct{}
+}
+
+// processQueue removes logs from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (lrp *batchLogRecordProcessor) processQueue() {
+	defer lrp.timer.Stop()
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case <-lrp.stopCh:
+			return
+		case <-lrp.timer.C:
+			if err := lrp.exportLogs(ctx); err != nil {
+				otel.Handle(err)
+			}
+		case sd := <-lrp.queue:
+			if ffs, ok := sd.(forceFlushLogs); ok {
+				close(ffs.flushed)
+				continue
+			}
+			lrp.batchMutex.Lock()
+			lrp.batch = append(lrp.batch, sd)
+			shouldExport := len(lrp.batch) >= lrp.o.MaxExportBatchSize
+			lrp.batchMutex.Unlock()
+			if shouldExport {
+				if !lrp.timer.Stop() {
+					<-lrp.timer.C
+				}
+				if err := lrp.exportLogs(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		}
+	}
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (lrp *batchLogRecordProcessor) drainQueue() {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case sd := <-lrp.queue:
+			if sd == nil {
+				if err := lrp.exportLogs(ctx); err != nil {
+					otel.Handle(err)
+				}
+				return
+			}
+
+			lrp.batchMutex.Lock()
+			lrp.batch = append(lrp.batch, sd)
+			shouldExport := len(lrp.batch) == lrp.o.MaxExportBatchSize
+			lrp.batchMutex.Unlock()
+
+			if shouldExport {
+				if err := lrp.exportLogs(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		default:
+			close(lrp.queue)
+		}
+	}
+}
+
+// exportLogs is a subroutine of processing and draining the queue.
+func (lrp *batchLogRecordProcessor) exportLogs(ctx context.Context) error {
+	lrp.timer.Reset(lrp.o.BatchTimeout)
+
+	lrp.batchMutex.Lock()
+	defer lrp.batchMutex.Unlock()
+
+	if lrp.o.ExportTimeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, lrp.o.ExportTimeout)
+		defer cancel()
+	}
+
+	if l := len(lrp.batch); l > 0 {
+		//global.Debug("exporting logs", "count", len(lrp.batch), "total_dropped", atomic.LoadUint32(&lrp.dropped))
+		err := lrp.e.Export(ctx, lrp.batch)
+
+		// A new batch is always created after exporting, even if the batch failed to be exported.
+		//
+		// It is up to the exporter to implement any type of retry logic if a batch is failing
+		// to be exported, since it is specific to the protocol and backend being sent to.
+		lrp.batch = lrp.batch[:0]
+
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (lrp *batchLogRecordProcessor) enqueue(sd ReadableLogRecord) {
+	ctx := context.TODO()
+	if lrp.o.BlockOnQueueFull {
+		lrp.enqueueBlockOnQueueFull(ctx, sd)
+	} else {
+		lrp.enqueueDrop(ctx, sd)
+	}
+}
+
+// ForceFlush exports all ended logs that have not yet been exported.
+func (lrp *batchLogRecordProcessor) ForceFlush(ctx context.Context) error {
+
+	// Interrupt if context is already canceled.
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+	// Do nothing after Shutdown.
+	// Do not enqueue spans after Shutdown.
+	if lrp.stopped.Load() {
+		return nil
+	}
+
+	var err error
+	if lrp.e != nil {
+		flushCh := make(chan struct{})
+		if lrp.enqueueBlockOnQueueFull(ctx, forceFlushLogs{flushed: flushCh}) {
+			select {
+			case <-flushCh:
+				// Processed any items in queue prior to ForceFlush being called
+			case <-ctx.Done():
+				return ctx.Err()
+			}
+		}
+
+		wait := make(chan error)
+		go func() {
+			wait <- lrp.exportLogs(ctx)
+			close(wait)
+		}()
+		// Wait until the export is finished or the context is cancelled/timed out
+		select {
+		case err = <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	}
+	return err
+}
+
+func recoverSendOnClosedChan() {
+	x := recover()
+	switch err := x.(type) {
+	case nil:
+		return
+	case runtime.Error:
+		if err.Error() == "send on closed channel" {
+			return
+		}
+	}
+	panic(x)
+}
+
+func (lrp *batchLogRecordProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadableLogRecord) bool {
+
+	// This ensures the bsp.queue<- below does not panic as the
+	// processor shuts down.
+	defer recoverSendOnClosedChan()
+
+	select {
+	case <-lrp.stopCh:
+		return false
+	default:
+	}
+
+	select {
+	case lrp.queue <- sd:
+		return true
+	case <-ctx.Done():
+		return false
+	}
+}
+
+func (lrp *batchLogRecordProcessor) enqueueDrop(ctx context.Context, ld ReadableLogRecord) bool {
+
+	// This ensures the bsp.queue<- below does not panic as the
+	// processor shuts down.
+	defer recoverSendOnClosedChan()
+
+	select {
+	case <-lrp.stopCh:
+		return false
+	default:
+	}
+
+	select {
+	case lrp.queue <- ld:
+		return true
+	default:
+		atomic.AddUint32(&lrp.dropped, 1)
+	}
+	return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (lrp *batchLogRecordProcessor) MarshalLog() interface{} {
+	return struct {
+		Type              string
+		LogRecordExporter LogRecordExporter
+		Config            BatchLogRecordProcessorOptions
+	}{
+		Type:              "BatchLogRecordProcessor",
+		LogRecordExporter: lrp.e,
+		Config:            lrp.o,
+	}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..295270847e39b3addfe8dd09e54bfffef0a05880
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go
@@ -0,0 +1,53 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+	"context"
+)
+
+// LogRecordExporter Interface for various logs exporters
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter
+type LogRecordExporter interface {
+
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Export exports a batch of logs.
+	//
+	// This function is called synchronously, so there is no concurrency
+	// safety requirement. However, due to the synchronous calling pattern,
+	// it is critical that all timeouts and cancellations contained in the
+	// passed context must be honored.
+	//
+	// Any retry logic must be contained in this function. The SDK that
+	// calls this function will not implement any retry logic. All errors
+	// returned by this function are considered unrecoverable and will be
+	// reported to a configured error Handler.
+	Export(ctx context.Context, batch []ReadableLogRecord) error
+
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown notifies the exporter of a pending halt to operations. The
+	// exporter is expected to perform any cleanup or synchronization it
+	// requires while honoring all timeouts and cancellations contained in
+	// the passed context.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..c924ccef21ba1ba2aacc384e662a203f04db6676
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go
@@ -0,0 +1,67 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+	"context"
+	"sync"
+)
+
+// LogRecordProcessor is an interface which allows hooks for LogRecord emitting.
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor
+type LogRecordProcessor interface {
+
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnEmit is called when logs sent. It is called synchronously and
+	// hence not block.
+	OnEmit(rol ReadableLogRecord)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown is called when the SDK shuts down. Any cleanup or release of
+	// resources held by the processor should be done in this call.
+	//
+	// Calls to Process, or ForceFlush after this has been called
+	// should be ignored.
+	//
+	// All timeouts and cancellations contained in ctx must be honored, this
+	// should not block indefinitely.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ForceFlush exports all ended logs to the configured Exporter that have not yet
+	// been exported.  It should only be called when absolutely necessary, such as when
+	// using a FaaS provider that may suspend the process after an invocation, but before
+	// the Processor can export the completed logs.
+	ForceFlush(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type logRecordProcessorState struct {
+	lp    LogRecordProcessor
+	state sync.Once
+}
+
+func newLogsProcessorState(lp LogRecordProcessor) *logRecordProcessorState {
+	return &logRecordProcessorState{lp: lp}
+}
+
+type logRecordProcessorStates []*logRecordProcessorState
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..37eeb0675fc21cdde9dab58407271e4a409e3ce2
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go
@@ -0,0 +1,191 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+	"github.com/agoda-com/opentelemetry-logs-go/semconv"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+	"sync"
+	"time"
+)
+
+type logger struct {
+	provider             *LoggerProvider
+	instrumentationScope instrumentation.Scope
+}
+
+var _ logs.Logger = &logger{}
+
+func (l logger) Emit(logRecord logs.LogRecord) {
+	lps := l.provider.getLogRecordProcessorStates()
+	if len(lps) == 0 {
+		return
+	}
+
+	pr, err := resource.Merge(l.provider.resource, logRecord.Resource())
+	if err != nil {
+		return
+	}
+
+	elr := &exportableLogRecord{
+		timestamp:            logRecord.Timestamp(),
+		observedTimestamp:    logRecord.ObservedTimestamp(),
+		traceId:              logRecord.TraceId(),
+		spanId:               logRecord.SpanId(),
+		traceFlags:           logRecord.TraceFlags(),
+		severityText:         logRecord.SeverityText(),
+		severityNumber:       logRecord.SeverityNumber(),
+		body:                 logRecord.Body(),
+		resource:             pr,
+		instrumentationScope: logRecord.InstrumentationScope(),
+		attributes:           logRecord.Attributes(),
+	}
+
+	for _, lp := range lps {
+		lp.lp.OnEmit(elr)
+	}
+}
+
+// ReadableLogRecord Log structure
+// see https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition
+// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord
+type ReadableLogRecord interface {
+	// Timestamp Time when the event occurred.
+	Timestamp() *time.Time
+	// ObservedTimestamp	Time when the event was observed.
+	ObservedTimestamp() time.Time
+	// TraceId Request trace id.
+	TraceId() *trace.TraceID
+	// SpanId Request span id.
+	SpanId() *trace.SpanID
+	// TraceFlags W3C trace flag.
+	TraceFlags() *trace.TraceFlags
+	// SeverityText This is the original string representation of the severityNumber as it is known at the source
+	SeverityText() *string
+	// SeverityNumber	Numerical value of the severityNumber.
+	SeverityNumber() *logs.SeverityNumber
+	// Body The body of the log record.
+	Body() *string
+	// Resource 	Describes the source of the log.
+	Resource() *resource.Resource
+	// InstrumentationScope returns information about the instrumentation
+	// scope that created the log.
+	InstrumentationScope() *instrumentation.Scope
+	// Attributes describe the aspects of the event.
+	Attributes() *[]attribute.KeyValue
+
+	// A private method to prevent users implementing the
+	// interface and so future additions to it will not
+	// violate compatibility.
+	private()
+}
+
+type ReadWriteLogRecord interface {
+	SetResource(resource *resource.Resource)
+	// RecordException message, stacktrace, type
+	RecordException(*string, *string, *string)
+	ReadableLogRecord
+}
+
+// exportableLogRecord is an implementation of the OpenTelemetry Log API
+// representing the individual component of a log.
+type exportableLogRecord struct {
+	// mu protects the contents of this log.
+	mu                   sync.Mutex
+	timestamp            *time.Time
+	observedTimestamp    time.Time
+	traceId              *trace.TraceID
+	spanId               *trace.SpanID
+	traceFlags           *trace.TraceFlags
+	severityText         *string
+	severityNumber       *logs.SeverityNumber
+	body                 *string
+	resource             *resource.Resource
+	instrumentationScope *instrumentation.Scope
+	attributes           *[]attribute.KeyValue
+}
+
+// newReadWriteLogRecord create
+// This method may change in the future
+func newReadWriteLogRecord(
+	ctx *trace.SpanContext,
+	body *string,
+	severityText *string,
+	severityNumber *logs.SeverityNumber,
+	resource *resource.Resource,
+	instrumentationScope *instrumentation.Scope,
+	attributes *[]attribute.KeyValue,
+	timestamp *time.Time) ReadWriteLogRecord {
+
+	traceId := ctx.TraceID()
+	spanId := ctx.SpanID()
+	traceFlags := ctx.TraceFlags()
+
+	return &exportableLogRecord{
+		timestamp:            timestamp,
+		observedTimestamp:    time.Now(),
+		traceId:              &traceId,
+		spanId:               &spanId,
+		traceFlags:           &traceFlags,
+		severityText:         severityText,
+		severityNumber:       severityNumber,
+		body:                 body,
+		resource:             resource,
+		instrumentationScope: instrumentationScope,
+		attributes:           attributes,
+	}
+}
+
+func (r *exportableLogRecord) SetResource(resource *resource.Resource) { r.resource = resource }
+
+// RecordException helper to add Exception related information as attributes of Log Record
+// see https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#recording-an-exception
+func (r *exportableLogRecord) RecordException(message *string, stacktrace *string, exceptionType *string) {
+	if message == nil && exceptionType == nil {
+		// one of the fields must present
+		return
+	}
+	if message != nil {
+		*r.attributes = append(*r.attributes, semconv.ExceptionMessage(*message))
+	}
+	if stacktrace != nil {
+		*r.attributes = append(*r.attributes, semconv.ExceptionStacktrace(*stacktrace))
+	}
+	if exceptionType != nil {
+		*r.attributes = append(*r.attributes, semconv.ExceptionType(*exceptionType))
+	}
+}
+
+func (r *exportableLogRecord) Timestamp() *time.Time        { return r.timestamp }
+func (r *exportableLogRecord) ObservedTimestamp() time.Time { return r.observedTimestamp }
+func (r *exportableLogRecord) TraceId() *trace.TraceID      { return r.traceId }
+
+func (r *exportableLogRecord) SpanId() *trace.SpanID         { return r.spanId }
+func (r *exportableLogRecord) TraceFlags() *trace.TraceFlags { return r.traceFlags }
+func (r *exportableLogRecord) InstrumentationScope() *instrumentation.Scope {
+	return r.instrumentationScope
+}
+func (r *exportableLogRecord) SeverityText() *string                { return r.severityText }
+func (r *exportableLogRecord) SeverityNumber() *logs.SeverityNumber { return r.severityNumber }
+func (r *exportableLogRecord) Body() *string                        { return r.body }
+func (r *exportableLogRecord) Resource() *resource.Resource         { return r.resource }
+func (r *exportableLogRecord) Attributes() *[]attribute.KeyValue    { return r.attributes }
+func (r *exportableLogRecord) private()                             {}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..c0e41e7e28263a55ed57bab2dd43d38f8e36afe6
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go
@@ -0,0 +1,274 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs // Package logs import "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+
+import (
+	"context"
+	"fmt"
+	"github.com/agoda-com/opentelemetry-logs-go/internal/global"
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"sync"
+	"sync/atomic"
+)
+
+const (
+	defaultLoggerName = "github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider"
+)
+
+// loggerProviderConfig Configuration for Logger Provider
+type loggerProviderConfig struct {
+	processors []LogRecordProcessor
+	// resource contains attributes representing an entity that produces telemetry.
+	resource *resource.Resource
+}
+
+// LoggerProviderOption configures a LoggerProvider.
+type LoggerProviderOption interface {
+	apply(loggerProviderConfig) loggerProviderConfig
+}
+type loggerProviderOptionFunc func(loggerProviderConfig) loggerProviderConfig
+
+func (fn loggerProviderOptionFunc) apply(cfg loggerProviderConfig) loggerProviderConfig {
+	return fn(cfg)
+}
+
+// WithLogRecordProcessor will configure processor to process logs
+func WithLogRecordProcessor(logsProcessor LogRecordProcessor) LoggerProviderOption {
+	return loggerProviderOptionFunc(func(cfg loggerProviderConfig) loggerProviderConfig {
+		cfg.processors = append(cfg.processors, logsProcessor)
+		return cfg
+	})
+}
+
+// WithSyncer registers the exporter with the LoggerProvider using a
+// SimpleLogRecordProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleLogRecordProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e LogRecordExporter) LoggerProviderOption {
+	return WithLogRecordProcessor(NewSimpleLogRecordProcessor(e))
+}
+
+// WithBatcher registers the exporter with the LoggerProvider using a
+// BatchLogRecordProcessor configured with the passed opts.
+func WithBatcher(e LogRecordExporter, opts ...BatchLogRecordProcessorOption) LoggerProviderOption {
+	return WithLogRecordProcessor(NewBatchLogRecordProcessor(e, opts...))
+}
+
+// WithResource will configure OTLP logger with common resource attributes.
+//
+// Parameters:
+// r (*resource.Resource) list of resources will be added to every log as resource level tags
+func WithResource(r *resource.Resource) LoggerProviderOption {
+	return loggerProviderOptionFunc(func(cfg loggerProviderConfig) loggerProviderConfig {
+		var err error
+		cfg.resource, err = resource.Merge(resource.Environment(), r)
+		if err != nil {
+			otel.Handle(err)
+		}
+		return cfg
+	})
+}
+
+// LoggerProvider provide access to Logger. The API is not intended to be called by application developers directly.
+// see https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#loggerprovider
+type LoggerProvider struct {
+	mu          sync.Mutex
+	namedLogger map[instrumentation.Scope]*logger
+	//cfg loggerProviderConfig
+
+	logProcessors atomic.Pointer[logRecordProcessorStates]
+	isShutdown    atomic.Bool
+
+	// These fields are not protected by the lock mu. They are assumed to be
+	// immutable after creation of the LoggerProvider.
+	resource *resource.Resource
+}
+
+var _ logs.LoggerProvider = &LoggerProvider{}
+
+func (lp *LoggerProvider) Logger(name string, opts ...logs.LoggerOption) logs.Logger {
+
+	if lp.isShutdown.Load() {
+		return logs.NewNoopLoggerProvider().Logger(name, opts...)
+	}
+
+	c := logs.NewLoggerConfig(opts...)
+
+	if name == "" {
+		name = defaultLoggerName
+	}
+
+	is := instrumentation.Scope{
+		Name:      name,
+		Version:   c.InstrumentationVersion(),
+		SchemaURL: c.SchemaURL(),
+	}
+
+	t, ok := func() (logs.Logger, bool) {
+		lp.mu.Lock()
+		defer lp.mu.Unlock()
+		// Must check the flag after acquiring the mutex to avoid returning a valid logger if Shutdown() ran
+		// after the first check above but before we acquired the mutex.
+		if lp.isShutdown.Load() {
+			return logs.NewNoopLoggerProvider().Logger(name, opts...), true
+		}
+
+		t, ok := lp.namedLogger[is]
+		if !ok {
+			t = &logger{
+				provider:             lp,
+				instrumentationScope: is,
+			}
+		}
+		return t, ok
+	}()
+	if !ok {
+		// This code is outside the mutex to not hold the lock while calling third party logging code:
+		// - That code may do slow things like I/O, which would prolong the duration the lock is held,
+		//   slowing down all tracing consumers.
+		// - Logging code may be instrumented with logging and deadlock because it could try
+		//   acquiring the same non-reentrant mutex.
+		global.Info("Logger created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
+
+	}
+	return t
+}
+
+var _ logs.LoggerProvider = &LoggerProvider{}
+
+func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider {
+	o := loggerProviderConfig{}
+
+	o = applyLoggerProviderEnvConfigs(o)
+
+	for _, opt := range opts {
+		o = opt.apply(o)
+	}
+
+	o = ensureValidLoggerProviderConfig(o)
+
+	lp := &LoggerProvider{
+		namedLogger: make(map[instrumentation.Scope]*logger),
+		resource:    o.resource,
+	}
+
+	global.Info("LoggerProvider created", "config", o)
+
+	lrpss := make(logRecordProcessorStates, 0, len(o.processors))
+	for _, lrp := range o.processors {
+		lrpss = append(lrpss, newLogsProcessorState(lrp))
+	}
+	lp.logProcessors.Store(&lrpss)
+
+	return lp
+
+}
+
+func (p *LoggerProvider) getLogRecordProcessorStates() logRecordProcessorStates {
+	return *(p.logProcessors.Load())
+}
+
+func (p LoggerProvider) Shutdown(ctx context.Context) error {
+	// This check prevents deadlocks in case of recursive shutdown.
+	if p.isShutdown.Load() {
+		return nil
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown has already been done concurrently.
+	if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+		return nil
+	}
+
+	var retErr error
+	for _, lrps := range p.getLogRecordProcessorStates() {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		var err error
+		lrps.state.Do(func() {
+			err = lrps.lp.Shutdown(ctx)
+		})
+		if err != nil {
+			if retErr == nil {
+				retErr = err
+			} else {
+				// Poor man's list of errors
+				retErr = fmt.Errorf("%v; %v", retErr, err)
+			}
+		}
+	}
+	p.logProcessors.Store(&logRecordProcessorStates{})
+	return retErr
+
+}
+
+// ForceFlush immediately exports all logs that have not yet been exported for
+// all the registered log processors.
+func (p *LoggerProvider) ForceFlush(ctx context.Context) error {
+	lrpss := p.getLogRecordProcessorStates()
+	if len(lrpss) == 0 {
+		return nil
+	}
+
+	for _, lrps := range lrpss {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		if err := lrps.lp.ForceFlush(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func applyLoggerProviderEnvConfigs(cfg loggerProviderConfig) loggerProviderConfig {
+	for _, opt := range loggerProviderOptionsFromEnv() {
+		cfg = opt.apply(cfg)
+	}
+
+	return cfg
+}
+
+func loggerProviderOptionsFromEnv() []LoggerProviderOption {
+	var opts []LoggerProviderOption
+
+	return opts
+}
+
+// ensureValidLoggerProviderConfig ensures that given LoggerProviderConfig is valid.
+func ensureValidLoggerProviderConfig(cfg loggerProviderConfig) loggerProviderConfig {
+
+	if cfg.resource == nil {
+		cfg.resource = resource.Default()
+	}
+
+	return cfg
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..453da09e181e3da7920c7748302e60651af70ca2
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go
@@ -0,0 +1,78 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logs
+
+import (
+	"context"
+	"go.opentelemetry.io/otel"
+	"log"
+	"sync"
+)
+
+type simpleLogRecordProcessor struct {
+	exporterMu sync.Mutex
+	stopOnce   sync.Once
+	exporter   LogRecordExporter
+}
+
+func (lrp *simpleLogRecordProcessor) Shutdown(ctx context.Context) error {
+	return nil
+}
+
+func (lrp *simpleLogRecordProcessor) ForceFlush(ctx context.Context) error {
+	return nil
+}
+
+var _ LogRecordProcessor = (*simpleLogRecordProcessor)(nil)
+
+// NewSimpleLogRecordProcessor returns a new LogRecordProcessor that will synchronously
+// send completed logs to the exporter immediately.
+//
+// This LogRecordProcessor is not recommended for production use. The synchronous
+// nature of this LogRecordProcessor make it good for testing, debugging, or
+// showing examples of other feature, but it will be slow and have a high
+// computation resource usage overhead. The BatchLogsProcessor is recommended
+// for production use instead.
+func NewSimpleLogRecordProcessor(exporter LogRecordExporter) LogRecordProcessor {
+	slp := &simpleLogRecordProcessor{
+		exporter: exporter,
+	}
+	log.Printf("SimpleLogsProcessor is not recommended for production use, consider using BatchLogRecordProcessor instead.")
+
+	return slp
+}
+
+// OnEmit Process immediately emits a LogRecord
+func (lrp *simpleLogRecordProcessor) OnEmit(rol ReadableLogRecord) {
+	lrp.exporterMu.Lock()
+	defer lrp.exporterMu.Unlock()
+
+	if err := lrp.exporter.Export(context.Background(), []ReadableLogRecord{rol}); err != nil {
+		otel.Handle(err)
+	}
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this LogRecord Processor.
+func (lrp *simpleLogRecordProcessor) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Exporter LogRecordExporter
+	}{
+		Type:     "SimpleLogRecordProcessor",
+		Exporter: lrp.exporter,
+	}
+}
diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f7215d711f62c9366fe5ad108c7fc328f2798d9
--- /dev/null
+++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go
@@ -0,0 +1,70 @@
+/*
+Copyright Agoda Services Co.,Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package semconv
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes Log Record attributes.
+// see also https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#attributes
+const (
+	// ExceptionMessageKey is the attribute Key conforming to the "exception.message"
+	// semantic conventions. It represents the exception message.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	ExceptionMessageKey = attribute.Key("exception.message")
+
+	// ExceptionStacktraceKey is the attribute Key conforming to the "exception.stacktrace"
+	// semantic conventions. It represents the stacktrace message of exception.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+	// semantic conventions. It represents the type of exception
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message
+// Examples: Division by zero; Can't convert 'int' object to str implicitly
+func ExceptionMessage(val string) attribute.KeyValue {
+	return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents the exception
+// stacktrace
+// Examples: Exception in thread "main" java.lang.RuntimeException: ...
+func ExceptionStacktrace(val string) attribute.KeyValue {
+	return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the exception type
+// Examples: java.net.ConnectException; OSError
+func ExceptionType(val string) attribute.KeyValue {
+	return ExceptionTypeKey.String(val)
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..50d95c548b67069b52ea4dcf91bf85314d488d41
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..89b8179965581339743b5edd9e18c1fcc778b56d
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16abdfc084f73cb6658c2ddf463ac02ff666dd6e
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,32 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[travis]: https://travis-ci.org/cenkalti/backoff
+[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 0000000000000000000000000000000000000000..3676ee405d87b3dc7b675f3c93f719a2abca12d2
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+	// NextBackOff returns the duration to wait before retrying the operation,
+	// or backoff. Stop to indicate that no more retries should be made.
+	//
+	// Example usage:
+	//
+	// 	duration := backoff.NextBackOff();
+	// 	if (duration == backoff.Stop) {
+	// 		// Do not retry operation.
+	// 	} else {
+	// 		// Sleep for duration and retry operation.
+	// 	}
+	//
+	NextBackOff() time.Duration
+
+	// Reset to initial state.
+	Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+	Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset()                     {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+	return &ConstantBackOff{Interval: d}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..48482330eb763651b2217b2704de5903b155d8d2
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+	"context"
+	"time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+	BackOff
+	Context() context.Context
+}
+
+type backOffContext struct {
+	BackOff
+	ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+	if ctx == nil {
+		panic("nil context")
+	}
+
+	if b, ok := b.(*backOffContext); ok {
+		return &backOffContext{
+			BackOff: b.BackOff,
+			ctx:     ctx,
+		}
+	}
+
+	return &backOffContext{
+		BackOff: b,
+		ctx:     ctx,
+	}
+}
+
+func getContext(b BackOff) context.Context {
+	if cb, ok := b.(BackOffContext); ok {
+		return cb.Context()
+	}
+	if tb, ok := b.(*backOffTries); ok {
+		return getContext(tb.delegate)
+	}
+	return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+	return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+	select {
+	case <-b.ctx.Done():
+		return Stop
+	default:
+		return b.BackOff.NextBackOff()
+	}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c56c1e7189ba605ae668e752b3781f815b1bbd1
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,161 @@
+package backoff
+
+import (
+	"math/rand"
+	"time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+     RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request #  RetryInterval (seconds)  Randomized Interval (seconds)
+
+  1          0.5                     [0.25,   0.75]
+  2          0.75                    [0.375,  1.125]
+  3          1.125                   [0.562,  1.687]
+  4          1.687                   [0.8435, 2.53]
+  5          2.53                    [1.265,  3.795]
+  6          3.795                   [1.897,  5.692]
+  7          5.692                   [2.846,  8.538]
+  8          8.538                   [4.269, 12.807]
+  9         12.807                   [6.403, 19.210]
+ 10         19.210                   backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+	InitialInterval     time.Duration
+	RandomizationFactor float64
+	Multiplier          float64
+	MaxInterval         time.Duration
+	// After MaxElapsedTime the ExponentialBackOff returns Stop.
+	// It never stops if MaxElapsedTime == 0.
+	MaxElapsedTime time.Duration
+	Stop           time.Duration
+	Clock          Clock
+
+	currentInterval time.Duration
+	startTime       time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+	Now() time.Time
+}
+
+// Default values for ExponentialBackOff.
+const (
+	DefaultInitialInterval     = 500 * time.Millisecond
+	DefaultRandomizationFactor = 0.5
+	DefaultMultiplier          = 1.5
+	DefaultMaxInterval         = 60 * time.Second
+	DefaultMaxElapsedTime      = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff() *ExponentialBackOff {
+	b := &ExponentialBackOff{
+		InitialInterval:     DefaultInitialInterval,
+		RandomizationFactor: DefaultRandomizationFactor,
+		Multiplier:          DefaultMultiplier,
+		MaxInterval:         DefaultMaxInterval,
+		MaxElapsedTime:      DefaultMaxElapsedTime,
+		Stop:                Stop,
+		Clock:               SystemClock,
+	}
+	b.Reset()
+	return b
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+	return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+	b.currentInterval = b.InitialInterval
+	b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// 	Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+	// Make sure we have not gone over the maximum elapsed time.
+	elapsed := b.GetElapsedTime()
+	next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+	b.incrementCurrentInterval()
+	if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+		return b.Stop
+	}
+	return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+	return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+	// Check for overflow, if overflow is detected set the current interval to the max interval.
+	if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+		b.currentInterval = b.MaxInterval
+	} else {
+		b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+	}
+}
+
+// Returns a random value from the following interval:
+// 	[currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+	if randomizationFactor == 0 {
+		return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+	}
+	var delta = randomizationFactor * float64(currentInterval)
+	var minInterval = float64(currentInterval) - delta
+	var maxInterval = float64(currentInterval) + delta
+
+	// Get a random value from the range [minInterval, maxInterval].
+	// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+	// we want a 33% chance for selecting either 1, 2 or 3.
+	return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9c0c51cd755f543a9596384e5b8668651204f5c
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+	"errors"
+	"time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+	return func() (struct{}, error) {
+		return struct{}{}, o()
+	}
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+	return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+	return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+	return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+	return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+	_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+	return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+	return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+	var (
+		err  error
+		next time.Duration
+		res  T
+	)
+	if t == nil {
+		t = &defaultTimer{}
+	}
+
+	defer func() {
+		t.Stop()
+	}()
+
+	ctx := getContext(b)
+
+	b.Reset()
+	for {
+		res, err = operation()
+		if err == nil {
+			return res, nil
+		}
+
+		var permanent *PermanentError
+		if errors.As(err, &permanent) {
+			return res, permanent.Err
+		}
+
+		if next = b.NextBackOff(); next == Stop {
+			if cerr := ctx.Err(); cerr != nil {
+				return res, cerr
+			}
+
+			return res, err
+		}
+
+		if notify != nil {
+			notify(err, next)
+		}
+
+		t.Start(next)
+
+		select {
+		case <-ctx.Done():
+			return res, ctx.Err()
+		case <-t.C():
+		}
+	}
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+	Err error
+}
+
+func (e *PermanentError) Error() string {
+	return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+	return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+	_, ok := target.(*PermanentError)
+	return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+	if err == nil {
+		return nil
+	}
+	return &PermanentError{
+		Err: err,
+	}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 0000000000000000000000000000000000000000..df9d68bce527f2952dd8a6b4569fff26b6c214c2
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+	"context"
+	"sync"
+	"time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+	C        <-chan time.Time
+	c        chan time.Time
+	b        BackOff
+	ctx      context.Context
+	timer    Timer
+	stop     chan struct{}
+	stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once.  The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+	return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+	if timer == nil {
+		timer = &defaultTimer{}
+	}
+	c := make(chan time.Time)
+	t := &Ticker{
+		C:     c,
+		c:     c,
+		b:     b,
+		ctx:   getContext(b),
+		timer: timer,
+		stop:  make(chan struct{}),
+	}
+	t.b.Reset()
+	go t.run()
+	return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+	t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+	c := t.c
+	defer close(c)
+
+	// Ticker is guaranteed to tick at least once.
+	afterC := t.send(time.Now())
+
+	for {
+		if afterC == nil {
+			return
+		}
+
+		select {
+		case tick := <-afterC:
+			afterC = t.send(tick)
+		case <-t.stop:
+			t.c = nil // Prevent future ticks from being sent to the channel.
+			return
+		case <-t.ctx.Done():
+			return
+		}
+	}
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+	select {
+	case t.c <- tick:
+	case <-t.stop:
+		return nil
+	}
+
+	next := t.b.NextBackOff()
+	if next == Stop {
+		t.Stop()
+		return nil
+	}
+
+	t.timer.Start(next)
+	return t.timer.C()
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 0000000000000000000000000000000000000000..8120d0213c58d550ea7cf20ab4d2329134abb12a
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+	Start(duration time.Duration)
+	Stop()
+	C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+	timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+	return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+	if t.timer == nil {
+		t.timer = time.NewTimer(duration)
+	} else {
+		t.timer.Reset(duration)
+	}
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 0000000000000000000000000000000000000000..28d58ca37c684020e7db4e1f5a1394cc4b4edef9
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+	return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+	delegate BackOff
+	maxTries uint64
+	numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+	if b.maxTries == 0 {
+		return Stop
+	}
+	if b.maxTries > 0 {
+		if b.maxTries <= b.numTries {
+			return Stop
+		}
+		b.numTries++
+	}
+	return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+	b.numTries = 0
+	b.delegate.Reset()
+}
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index a8c29bfbd53053708c3da3d3ff6070d1d1400530..8969526a6e5de9a663ccfce593b665687573b9bf 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
 | Adding a name to a logger | `WithName` | no API |
 | Modify verbosity of log entries in a call chain | `V` | no API |
 | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
 
 The high-level slog API is explicitly meant to be one of many different APIs
 that can be layered on top of a shared `slog.Handler`. logr is one such
-alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
-package.
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
 
 ### Inspiration
 
@@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
 ## slog interoperability
 
 Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
-and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
-`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
 As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
-slog API. `slogr` itself leaves that to the caller.
+slog API.
 
-## Using a `logr.Sink` as backend for slog
+### Using a `logr.LogSink` as backend for slog
 
 Ideally, a logr sink implementation should support both logr and slog by
-implementing both the normal logr interface(s) and `slogr.SlogSink`.  Because
+implementing both the normal logr interface(s) and `SlogSink`.  Because
 of a conflict in the parameters of the common `Enabled` method, it is [not
 possible to implement both slog.Handler and logr.Sink in the same
 type](https://github.com/golang/go/issues/59110).
 
 If both are supported, log calls can go from the high-level APIs to the backend
-without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
 convert back and forth without adding additional wrappers, with one exception:
 when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
-`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
 log calls.
 
 Such an implementation should also support values that implement specific
@@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
 These drawbacks are severe enough that applications using a mixture of slog and
 logr should switch to a different backend.
 
-## Using a `slog.Handler` as backend for logr
+### Using a `slog.Handler` as backend for logr
 
 Using a plain `slog.Handler` without support for logr works better than the
 other direction:
 - All logr verbosity levels can be mapped 1:1 to their corresponding slog level
   by negating them.
-- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
+- Stack unwinding is done by the `SlogSink` and the resulting program
   counter is passed to the `slog.Handler`.
 - Names added via `Logger.WithName` are gathered and recorded in an additional
   attribute with `logger` as key and the names separated by slash as value.
@@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
 with logr implementations without slog support is not important, then
 `slog.Valuer` is sufficient.
 
-## Context support for slog
+### Context support for slog
 
 Storing a logger in a `context.Context` is not supported by
-slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
-to fill this gap:
-
-    func HandlerFromContext(ctx context.Context) slog.Handler {
-        logger, err := logr.FromContext(ctx)
-        if err == nil {
-            return slogr.NewSlogHandler(logger)
-        }
-        return slog.Default().Handler()
-    }
-
-    func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
-        return logr.NewContext(ctx, slogr.NewLogr(handler))
-    }
-
-The downside is that storing and retrieving a `slog.Handler` needs more
-allocations compared to using a `logr.Logger`. Therefore the recommendation is
-to use the `logr.Logger` API in code which uses contextual logging.
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
 
 ## FAQ
 
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..de8bcc3ad892608b2f0c1f0c700965d1578552b0
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+	return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+	return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 0000000000000000000000000000000000000000..f012f9a18e8b90d4dce7364607e3b51befa39359
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+		return v, nil
+	}
+
+	return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+		return v
+	}
+
+	return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 0000000000000000000000000000000000000000..065ef0b8280301682a74104ad48f75ce072f0d67
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+	v := ctx.Value(contextKey{})
+	if v == nil {
+		return Logger{}, notFoundError{}
+	}
+
+	switch v := v.(type) {
+	case Logger:
+		return v, nil
+	case *slog.Logger:
+		return FromSlogHandler(v.Handler()), nil
+	default:
+		// Not reached.
+		panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+	}
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+	v := ctx.Value(contextKey{})
+	if v == nil {
+		return nil
+	}
+
+	switch v := v.(type) {
+	case Logger:
+		return slog.New(ToSlogHandler(v))
+	case *slog.Logger:
+		return v
+	default:
+		// Not reached.
+		panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+	}
+}
+
+// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+	if logger, err := FromContext(ctx); err == nil {
+		return logger
+	}
+	return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb2f866f4b7449371ffa1754c0ee264b6828db8d
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,911 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function.  See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged.  When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+	return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+	fnWrapper := func(_, obj string) {
+		fn(obj)
+	}
+	return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+	GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+	l := &fnlogger{
+		Formatter: formatter,
+		write:     fn,
+	}
+	// For skipping fnlogger.Info and fnlogger.Error.
+	l.Formatter.AddCallDepth(1)
+	return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+	// LogCaller tells funcr to add a "caller" key to some or all log lines.
+	// This has some overhead, so some users might not want it.
+	LogCaller MessageClass
+
+	// LogCallerFunc tells funcr to also log the calling function name.  This
+	// has no effect if caller logging is not enabled (see Options.LogCaller).
+	LogCallerFunc bool
+
+	// LogTimestamp tells funcr to add a "ts" key to log lines.  This has some
+	// overhead, so some users might not want it.
+	LogTimestamp bool
+
+	// TimestampFormat tells funcr how to render timestamps when LogTimestamp
+	// is enabled.  If not specified, a default format will be used.  For more
+	// details, see docs for Go's time.Layout.
+	TimestampFormat string
+
+	// LogInfoLevel tells funcr what key to use to log the info level.
+	// If not specified, the info level will be logged as "level".
+	// If this is set to "", the info level will not be logged at all.
+	LogInfoLevel *string
+
+	// Verbosity tells funcr which V logs to produce.  Higher values enable
+	// more logs.  Info logs at or below this level will be written, while logs
+	// above this level will be discarded.
+	Verbosity int
+
+	// RenderBuiltinsHook allows users to mutate the list of key-value pairs
+	// while a log line is being rendered.  The kvList argument follows logr
+	// conventions - each pair of slice elements is comprised of a string key
+	// and an arbitrary value (verified and sanitized before calling this
+	// hook).  The value returned must follow the same conventions.  This hook
+	// can be used to audit or modify logged data.  For example, you might want
+	// to prefix all of funcr's built-in keys with some string.  This hook is
+	// only called for built-in (provided by funcr itself) key-value pairs.
+	// Equivalent hooks are offered for key-value pairs saved via
+	// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+	// for user-provided pairs (see RenderArgsHook).
+	RenderBuiltinsHook func(kvList []any) []any
+
+	// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+	// only called for key-value pairs saved via logr.Logger.WithValues.  See
+	// RenderBuiltinsHook for more details.
+	RenderValuesHook func(kvList []any) []any
+
+	// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+	// called for key-value pairs passed directly to Info and Error.  See
+	// RenderBuiltinsHook for more details.
+	RenderArgsHook func(kvList []any) []any
+
+	// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+	// that contains a struct, etc.) it may log.  Every time it finds a struct,
+	// slice, array, or map the depth is increased by one.  When the maximum is
+	// reached, the value will be converted to a string indicating that the max
+	// depth has been exceeded.  If this field is not specified, a default
+	// value will be used.
+	MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+	// None ignores all message classes.
+	None MessageClass = iota
+	// All considers all message classes.
+	All
+	// Info only considers info messages.
+	Info
+	// Error only considers error messages.
+	Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+	Formatter
+	write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+	l.Formatter.AddName(name)
+	return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+	l.Formatter.AddValues(kvList)
+	return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+	l.Formatter.AddCallDepth(depth)
+	return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+	prefix, args := l.FormatInfo(level, msg, kvList)
+	l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+	prefix, args := l.FormatError(err, msg, kvList)
+	l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+	return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+	return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+	return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+	if opts.TimestampFormat == "" {
+		opts.TimestampFormat = defaultTimestampFormat
+	}
+	if opts.MaxLogDepth == 0 {
+		opts.MaxLogDepth = defaultMaxLogDepth
+	}
+	if opts.LogInfoLevel == nil {
+		opts.LogInfoLevel = new(string)
+		*opts.LogInfoLevel = "level"
+	}
+	f := Formatter{
+		outputFormat: outfmt,
+		prefix:       "",
+		values:       nil,
+		depth:        0,
+		opts:         &opts,
+	}
+	return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+	outputFormat    outputFormat
+	prefix          string
+	values          []any
+	valuesStr       string
+	parentValuesStr string
+	depth           int
+	opts            *Options
+	group           string // for slog groups
+	groupDepth      int
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+	// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+	outputKeyValue outputFormat = iota
+	// outputJSON emits strict JSON.
+	outputJSON
+)
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+	// Empirically bytes.Buffer is faster than strings.Builder for this.
+	buf := bytes.NewBuffer(make([]byte, 0, 1024))
+	if f.outputFormat == outputJSON {
+		buf.WriteByte('{') // for the whole line
+	}
+
+	vals := builtins
+	if hook := f.opts.RenderBuiltinsHook; hook != nil {
+		vals = hook(f.sanitize(vals))
+	}
+	f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+	continuing := len(builtins) > 0
+
+	if f.parentValuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.parentValuesStr)
+		continuing = true
+	}
+
+	groupDepth := f.groupDepth
+	if f.group != "" {
+		if f.valuesStr != "" || len(args) != 0 {
+			if continuing {
+				buf.WriteByte(f.comma())
+			}
+			buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+			buf.WriteByte(f.colon())
+			buf.WriteByte('{') // for the group
+			continuing = false
+		} else {
+			// The group was empty
+			groupDepth--
+		}
+	}
+
+	if f.valuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.valuesStr)
+		continuing = true
+	}
+
+	vals = args
+	if hook := f.opts.RenderArgsHook; hook != nil {
+		vals = hook(f.sanitize(vals))
+	}
+	f.flatten(buf, vals, continuing, true) // escape user-provided keys
+
+	for i := 0; i < groupDepth; i++ {
+		buf.WriteByte('}') // for the groups
+	}
+
+	if f.outputFormat == outputJSON {
+		buf.WriteByte('}') // for the whole line
+	}
+
+	return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer.  If continuing is
+// true, it assumes that the buffer has previous values and will emit a
+// separator (which depends on the output format) before the first pair it
+// writes.  If escapeKeys is true, the keys are assumed to have
+// non-JSON-compatible characters in them and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
+	// This logic overlaps with sanitize() but saves one type-cast per key,
+	// which can be measurable.
+	if len(kvList)%2 != 0 {
+		kvList = append(kvList, noValue)
+	}
+	copied := false
+	for i := 0; i < len(kvList); i += 2 {
+		k, ok := kvList[i].(string)
+		if !ok {
+			if !copied {
+				newList := make([]any, len(kvList))
+				copy(newList, kvList)
+				kvList = newList
+				copied = true
+			}
+			k = f.nonStringKey(kvList[i])
+			kvList[i] = k
+		}
+		v := kvList[i+1]
+
+		if i > 0 || continuing {
+			if f.outputFormat == outputJSON {
+				buf.WriteByte(f.comma())
+			} else {
+				// In theory the format could be something we don't understand.  In
+				// practice, we control it, so it won't be.
+				buf.WriteByte(' ')
+			}
+		}
+
+		buf.WriteString(f.quoted(k, escapeKeys))
+		buf.WriteByte(f.colon())
+		buf.WriteString(f.pretty(v))
+	}
+	return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+	if escape {
+		return prettyString(str)
+	}
+	// this is faster
+	return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+	if f.outputFormat == outputJSON {
+		return ','
+	}
+	return ' '
+}
+
+func (f Formatter) colon() byte {
+	if f.outputFormat == outputJSON {
+		return ':'
+	}
+	return '='
+}
+
+func (f Formatter) pretty(value any) string {
+	return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+	flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+	if depth > f.opts.MaxLogDepth {
+		return `"<max-log-depth-exceeded>"`
+	}
+
+	// Handle types that take full control of logging.
+	if v, ok := value.(logr.Marshaler); ok {
+		// Replace the value with what the type wants to get logged.
+		// That then gets handled below via reflection.
+		value = invokeMarshaler(v)
+	}
+
+	// Handle types that want to format themselves.
+	switch v := value.(type) {
+	case fmt.Stringer:
+		value = invokeStringer(v)
+	case error:
+		value = invokeError(v)
+	}
+
+	// Handling the most common types without reflect is a small perf win.
+	switch v := value.(type) {
+	case bool:
+		return strconv.FormatBool(v)
+	case string:
+		return prettyString(v)
+	case int:
+		return strconv.FormatInt(int64(v), 10)
+	case int8:
+		return strconv.FormatInt(int64(v), 10)
+	case int16:
+		return strconv.FormatInt(int64(v), 10)
+	case int32:
+		return strconv.FormatInt(int64(v), 10)
+	case int64:
+		return strconv.FormatInt(int64(v), 10)
+	case uint:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint8:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint16:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint32:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint64:
+		return strconv.FormatUint(v, 10)
+	case uintptr:
+		return strconv.FormatUint(uint64(v), 10)
+	case float32:
+		return strconv.FormatFloat(float64(v), 'f', -1, 32)
+	case float64:
+		return strconv.FormatFloat(v, 'f', -1, 64)
+	case complex64:
+		return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+	case complex128:
+		return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+	case PseudoStruct:
+		buf := bytes.NewBuffer(make([]byte, 0, 1024))
+		v = f.sanitize(v)
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('{')
+		}
+		for i := 0; i < len(v); i += 2 {
+			if i > 0 {
+				buf.WriteByte(f.comma())
+			}
+			k, _ := v[i].(string) // sanitize() above means no need to check success
+			// arbitrary keys might need escaping
+			buf.WriteString(prettyString(k))
+			buf.WriteByte(f.colon())
+			buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+		}
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('}')
+		}
+		return buf.String()
+	}
+
+	buf := bytes.NewBuffer(make([]byte, 0, 256))
+	t := reflect.TypeOf(value)
+	if t == nil {
+		return "null"
+	}
+	v := reflect.ValueOf(value)
+	switch t.Kind() {
+	case reflect.Bool:
+		return strconv.FormatBool(v.Bool())
+	case reflect.String:
+		return prettyString(v.String())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return strconv.FormatInt(int64(v.Int()), 10)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return strconv.FormatUint(uint64(v.Uint()), 10)
+	case reflect.Float32:
+		return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+	case reflect.Float64:
+		return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+	case reflect.Complex64:
+		return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+	case reflect.Complex128:
+		return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+	case reflect.Struct:
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('{')
+		}
+		printComma := false // testing i>0 is not enough because of JSON omitted fields
+		for i := 0; i < t.NumField(); i++ {
+			fld := t.Field(i)
+			if fld.PkgPath != "" {
+				// reflect says this field is only defined for non-exported fields.
+				continue
+			}
+			if !v.Field(i).CanInterface() {
+				// reflect isn't clear exactly what this means, but we can't use it.
+				continue
+			}
+			name := ""
+			omitempty := false
+			if tag, found := fld.Tag.Lookup("json"); found {
+				if tag == "-" {
+					continue
+				}
+				if comma := strings.Index(tag, ","); comma != -1 {
+					if n := tag[:comma]; n != "" {
+						name = n
+					}
+					rest := tag[comma:]
+					if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+						omitempty = true
+					}
+				} else {
+					name = tag
+				}
+			}
+			if omitempty && isEmpty(v.Field(i)) {
+				continue
+			}
+			if printComma {
+				buf.WriteByte(f.comma())
+			}
+			printComma = true // if we got here, we are rendering a field
+			if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+				buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+				continue
+			}
+			if name == "" {
+				name = fld.Name
+			}
+			// field names can't contain characters which need escaping
+			buf.WriteString(f.quoted(name, false))
+			buf.WriteByte(f.colon())
+			buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+		}
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('}')
+		}
+		return buf.String()
+	case reflect.Slice, reflect.Array:
+		// If this is outputing as JSON make sure this isn't really a json.RawMessage.
+		// If so just emit "as-is" and don't pretty it as that will just print
+		// it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+		if f.outputFormat == outputJSON {
+			if rm, ok := value.(json.RawMessage); ok {
+				// If it's empty make sure we emit an empty value as the array style would below.
+				if len(rm) > 0 {
+					buf.Write(rm)
+				} else {
+					buf.WriteString("null")
+				}
+				return buf.String()
+			}
+		}
+		buf.WriteByte('[')
+		for i := 0; i < v.Len(); i++ {
+			if i > 0 {
+				buf.WriteByte(f.comma())
+			}
+			e := v.Index(i)
+			buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+		}
+		buf.WriteByte(']')
+		return buf.String()
+	case reflect.Map:
+		buf.WriteByte('{')
+		// This does not sort the map keys, for best perf.
+		it := v.MapRange()
+		i := 0
+		for it.Next() {
+			if i > 0 {
+				buf.WriteByte(f.comma())
+			}
+			// If a map key supports TextMarshaler, use it.
+			keystr := ""
+			if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+				txt, err := m.MarshalText()
+				if err != nil {
+					keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
+				} else {
+					keystr = string(txt)
+				}
+				keystr = prettyString(keystr)
+			} else {
+				// prettyWithFlags will produce already-escaped values
+				keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+				if t.Key().Kind() != reflect.String {
+					// JSON only does string keys.  Unlike Go's standard JSON, we'll
+					// convert just about anything to a string.
+					keystr = prettyString(keystr)
+				}
+			}
+			buf.WriteString(keystr)
+			buf.WriteByte(f.colon())
+			buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+			i++
+		}
+		buf.WriteByte('}')
+		return buf.String()
+	case reflect.Ptr, reflect.Interface:
+		if v.IsNil() {
+			return "null"
+		}
+		return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+	}
+	return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+	// Avoid escaping (which does allocations) if we can.
+	if needsEscape(s) {
+		return strconv.Quote(s)
+	}
+	b := bytes.NewBuffer(make([]byte, 0, 1024))
+	b.WriteByte('"')
+	b.WriteString(s)
+	b.WriteByte('"')
+	return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+	for _, r := range s {
+		if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+			return true
+		}
+	}
+	return false
+}
+
+func isEmpty(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return s.String()
+}
+
+func invokeError(e error) (ret string) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper.  The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+	// File is the basename of the file for this call site.
+	File string `json:"file"`
+	// Line is the line number in the file for this call site.
+	Line int `json:"line"`
+	// Func is the function name for this call site, or empty if
+	// Options.LogCallerFunc is not enabled.
+	Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+	// +1 for this frame, +1 for Info/Error.
+	pc, file, line, ok := runtime.Caller(f.depth + 2)
+	if !ok {
+		return Caller{"<unknown>", 0, ""}
+	}
+	fn := ""
+	if f.opts.LogCallerFunc {
+		if fp := runtime.FuncForPC(pc); fp != nil {
+			fn = fp.Name()
+		}
+	}
+
+	return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = "<no-value>"
+
+func (f Formatter) nonStringKey(v any) string {
+	return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+	const snipLen = 16
+
+	snip := f.pretty(v)
+	if len(snip) > snipLen {
+		snip = snip[:snipLen]
+	}
+	return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+	if len(kvList)%2 != 0 {
+		kvList = append(kvList, noValue)
+	}
+	for i := 0; i < len(kvList); i += 2 {
+		_, ok := kvList[i].(string)
+		if !ok {
+			kvList[i] = f.nonStringKey(kvList[i])
+		}
+	}
+	return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew.  This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(group string) {
+	// Unnamed groups are just inlined.
+	if group == "" {
+		return
+	}
+
+	// Any saved values can no longer be changed.
+	buf := bytes.NewBuffer(make([]byte, 0, 1024))
+	continuing := false
+
+	if f.parentValuesStr != "" {
+		buf.WriteString(f.parentValuesStr)
+		continuing = true
+	}
+
+	if f.group != "" && f.valuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+		buf.WriteByte(f.colon())
+		buf.WriteByte('{') // for the group
+		continuing = false
+	}
+
+	if f.valuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.valuesStr)
+	}
+
+	// NOTE: We don't close the scope here - that's done later, when a log line
+	// is actually rendered (because we have N scopes to close).
+
+	f.parentValuesStr = buf.String()
+
+	// Start collecting new values.
+	f.group = group
+	f.groupDepth++
+	f.valuesStr = ""
+	f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+	f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+	return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter.  This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+	return f.depth
+}
+
+// FormatInfo renders an Info log message into strings.  The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+	args := make([]any, 0, 64) // using a constant here impacts perf
+	prefix = f.prefix
+	if f.outputFormat == outputJSON {
+		args = append(args, "logger", prefix)
+		prefix = ""
+	}
+	if f.opts.LogTimestamp {
+		args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+	}
+	if policy := f.opts.LogCaller; policy == All || policy == Info {
+		args = append(args, "caller", f.caller())
+	}
+	if key := *f.opts.LogInfoLevel; key != "" {
+		args = append(args, key, level)
+	}
+	args = append(args, "msg", msg)
+	return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings.  The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+	args := make([]any, 0, 64) // using a constant here impacts perf
+	prefix = f.prefix
+	if f.outputFormat == outputJSON {
+		args = append(args, "logger", prefix)
+		prefix = ""
+	}
+	if f.opts.LogTimestamp {
+		args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+	}
+	if policy := f.opts.LogCaller; policy == All || policy == Error {
+		args = append(args, "caller", f.caller())
+	}
+	args = append(args, "msg", msg)
+	var loggableErr any
+	if err != nil {
+		loggableErr = err.Error()
+	}
+	args = append(args, "error", loggableErr)
+	return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name.  funcr uses '/' characters to separate
+// name elements.  Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+	if len(f.prefix) > 0 {
+		f.prefix += "/"
+	}
+	f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+	// Three slice args forces a copy.
+	n := len(f.values)
+	f.values = append(f.values[:n:n], kvList...)
+
+	vals := f.values
+	if hook := f.opts.RenderValuesHook; hook != nil {
+		vals = hook(f.sanitize(vals))
+	}
+
+	// Pre-render values, so we don't have to do it on each Info/Error call.
+	buf := bytes.NewBuffer(make([]byte, 0, 1024))
+	f.flatten(buf, vals, false, true) // escape user-provided keys
+	f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+	f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bd84761e2d0246095254eb5be3b45e3f084a3f2
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+	"context"
+	"log/slog"
+
+	"github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+	kvList := make([]any, 0, 2*record.NumAttrs())
+	record.Attrs(func(attr slog.Attr) bool {
+		kvList = attrToKVs(attr, kvList)
+		return true
+	})
+
+	if record.Level >= slog.LevelError {
+		l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+	} else {
+		level := l.levelFromSlog(record.Level)
+		l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+	}
+	return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+	kvList := make([]any, 0, 2*len(attrs))
+	for _, attr := range attrs {
+		kvList = attrToKVs(attr, kvList)
+	}
+	l.AddValues(kvList)
+	return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+	l.startGroup(name)
+	return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList.  It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+	attrVal := attr.Value.Resolve()
+	if attrVal.Kind() == slog.KindGroup {
+		groupVal := attrVal.Group()
+		grpKVs := make([]any, 0, 2*len(groupVal))
+		for _, attr := range groupVal {
+			grpKVs = attrToKVs(attr, grpKVs)
+		}
+		if attr.Key == "" {
+			// slog says we have to inline these
+			kvList = append(kvList, grpKVs...)
+		} else {
+			kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+		}
+	} else if attr.Key != "" {
+		kvList = append(kvList, attr.Key, attrVal.Any())
+	}
+
+	return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+//	logrV0 := getMyLogger()
+//	logrV2 := logrV0.V(2)
+//	slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+//	slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+//	slogV2.Info("msg")  // =~  logrV2.V(0) =~ logrV0.V(2)
+//	slogv2.Warn("msg")  // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+	result := -level
+	if result < 0 {
+		result = 0 // because LogSink doesn't expect negative V levels
+	}
+	return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
index 2a5075a180f4f792d43faf2fca90e117890b8b74..b4428e105b484991fbe5bf43ae29aa61f635e9c2 100644
--- a/vendor/github.com/go-logr/logr/logr.go
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -207,10 +207,6 @@ limitations under the License.
 // those.
 package logr
 
-import (
-	"context"
-)
-
 // New returns a new Logger instance.  This is primarily used by libraries
 // implementing LogSink, rather than end users.  Passing a nil sink will create
 // a Logger which discards all log lines.
@@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
 	return l.sink == nil
 }
 
-// contextKey is how we find Loggers in a context.Context.
-type contextKey struct{}
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
-	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
-		return v, nil
-	}
-
-	return Logger{}, notFoundError{}
-}
-
-// notFoundError exists to carry an IsNotFound method.
-type notFoundError struct{}
-
-func (notFoundError) Error() string {
-	return "no logr.Logger was present"
-}
-
-func (notFoundError) IsNotFound() bool {
-	return true
-}
-
-// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
-	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
-		return v
-	}
-
-	return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
-	return context.WithValue(ctx, contextKey{}, logger)
-}
-
 // RuntimeInfo holds information that the logr "core" library knows which
 // LogSinks might want to know.
 type RuntimeInfo struct {
diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
similarity index 63%
rename from vendor/github.com/go-logr/logr/slogr/sloghandler.go
rename to vendor/github.com/go-logr/logr/sloghandler.go
index ec6725ce2cdbe6a9b52aff5637f7084f1d7cda26..82d1ba494819b32c1fc4a3771ae972e473de124b 100644
--- a/vendor/github.com/go-logr/logr/slogr/sloghandler.go
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -17,18 +17,16 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
-package slogr
+package logr
 
 import (
 	"context"
 	"log/slog"
-
-	"github.com/go-logr/logr"
 )
 
 type slogHandler struct {
 	// May be nil, in which case all logs get discarded.
-	sink logr.LogSink
+	sink LogSink
 	// Non-nil if sink is non-nil and implements SlogSink.
 	slogSink SlogSink
 
@@ -54,7 +52,7 @@ func (l *slogHandler) GetLevel() slog.Level {
 	return l.levelBias
 }
 
-func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool {
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
 	return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
 }
 
@@ -72,9 +70,7 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
 
 	kvList := make([]any, 0, 2*record.NumAttrs())
 	record.Attrs(func(attr slog.Attr) bool {
-		if attr.Key != "" {
-			kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any())
-		}
+		kvList = attrToKVs(attr, l.groupPrefix, kvList)
 		return true
 	})
 	if record.Level >= slog.LevelError {
@@ -90,15 +86,15 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
 // are called by Handle, code in slog gets skipped.
 //
 // This offset currently (Go 1.21.0) works for calls through
-// slog.New(NewSlogHandler(...)).  There's no guarantee that the call
+// slog.New(ToSlogHandler(...)).  There's no guarantee that the call
 // chain won't change. Wrapping the handler will also break unwinding. It's
 // still better than not adjusting at all....
 //
-// This cannot be done when constructing the handler because NewLogr needs
+// This cannot be done when constructing the handler because FromSlogHandler needs
 // access to the original sink without this adjustment. A second copy would
 // work, but then WithAttrs would have to be called for both of them.
-func (l *slogHandler) sinkWithCallDepth() logr.LogSink {
-	if sink, ok := l.sink.(logr.CallDepthLogSink); ok {
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+	if sink, ok := l.sink.(CallDepthLogSink); ok {
 		return sink.WithCallDepth(2)
 	}
 	return l.sink
@@ -109,60 +105,88 @@ func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
 		return l
 	}
 
-	copy := *l
+	clone := *l
 	if l.slogSink != nil {
-		copy.slogSink = l.slogSink.WithAttrs(attrs)
-		copy.sink = copy.slogSink
+		clone.slogSink = l.slogSink.WithAttrs(attrs)
+		clone.sink = clone.slogSink
 	} else {
 		kvList := make([]any, 0, 2*len(attrs))
 		for _, attr := range attrs {
-			if attr.Key != "" {
-				kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any())
-			}
+			kvList = attrToKVs(attr, l.groupPrefix, kvList)
 		}
-		copy.sink = l.sink.WithValues(kvList...)
+		clone.sink = l.sink.WithValues(kvList...)
 	}
-	return &copy
+	return &clone
 }
 
 func (l *slogHandler) WithGroup(name string) slog.Handler {
 	if l.sink == nil {
 		return l
 	}
-	copy := *l
+	if name == "" {
+		// slog says to inline empty groups
+		return l
+	}
+	clone := *l
 	if l.slogSink != nil {
-		copy.slogSink = l.slogSink.WithGroup(name)
-		copy.sink = l.slogSink
+		clone.slogSink = l.slogSink.WithGroup(name)
+		clone.sink = clone.slogSink
 	} else {
-		copy.groupPrefix = copy.addGroupPrefix(name)
+		clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+	}
+	return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList.  It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+	attrVal := attr.Value.Resolve()
+	if attrVal.Kind() == slog.KindGroup {
+		groupVal := attrVal.Group()
+		grpKVs := make([]any, 0, 2*len(groupVal))
+		prefix := groupPrefix
+		if attr.Key != "" {
+			prefix = addPrefix(groupPrefix, attr.Key)
+		}
+		for _, attr := range groupVal {
+			grpKVs = attrToKVs(attr, prefix, grpKVs)
+		}
+		kvList = append(kvList, grpKVs...)
+	} else if attr.Key != "" {
+		kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
 	}
-	return &copy
+
+	return kvList
 }
 
-func (l *slogHandler) addGroupPrefix(name string) string {
-	if l.groupPrefix == "" {
+func addPrefix(prefix, name string) string {
+	if prefix == "" {
 		return name
 	}
-	return l.groupPrefix + groupSeparator + name
+	if name == "" {
+		return prefix
+	}
+	return prefix + groupSeparator + name
 }
 
 // levelFromSlog adjusts the level by the logger's verbosity and negates it.
 // It ensures that the result is >= 0. This is necessary because the result is
-// passed to a logr.LogSink and that API did not historically document whether
+// passed to a LogSink and that API did not historically document whether
 // levels could be negative or what that meant.
 //
 // Some example usage:
-//     logrV0 := getMyLogger()
-//     logrV2 := logrV0.V(2)
-//     slogV2 := slog.New(slogr.NewSlogHandler(logrV2))
-//     slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
-//     slogV2.Info("msg")  // =~  logrV2.V(0) =~ logrV0.V(2)
-//     slogv2.Warn("msg")  // =~ logrV2.V(-4) =~ logrV0.V(0)
+//
+//	logrV0 := getMyLogger()
+//	logrV2 := logrV0.V(2)
+//	slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+//	slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+//	slogV2.Info("msg")  // =~  logrV2.V(0) =~ logrV0.V(2)
+//	slogv2.Warn("msg")  // =~ logrV2.V(-4) =~ logrV0.V(0)
 func (l *slogHandler) levelFromSlog(level slog.Level) int {
 	result := -level
-	result += l.levelBias // in case the original logr.Logger had a V level
+	result += l.levelBias // in case the original Logger had a V level
 	if result < 0 {
-		result = 0 // because logr.LogSink doesn't expect negative V levels
+		result = 0 // because LogSink doesn't expect negative V levels
 	}
 	return int(result)
 }
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 0000000000000000000000000000000000000000..28a83d02439e470201b66a037d1eefe6d828640c
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+	"log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+	if handler, ok := handler.(*slogHandler); ok {
+		if handler.sink == nil {
+			return Discard()
+		}
+		return New(handler.sink).V(int(handler.levelBias))
+	}
+	return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+//	logger := <some Logger with 0 as verbosity level>
+//	slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+//	slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+//	slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+//	slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+//	slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+	if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+		return sink.handler
+	}
+
+	handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+	if slogSink, ok := handler.sink.(SlogSink); ok {
+		handler.slogSink = slogSink
+	}
+	return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+//   - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+//     as intended by slog
+//   - proper grouping of key/value pairs via WithGroup
+//   - verbosity levels > slog.LevelInfo can be recorded
+//   - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+	LogSink
+
+	Handle(ctx context.Context, record slog.Record) error
+	WithAttrs(attrs []slog.Attr) SlogSink
+	WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go
index eb519ae23f8566788782b3a9aaab6e2ea7ce34bb..36432c56fdf64cfb9d32cddda1d4491731e73484 100644
--- a/vendor/github.com/go-logr/logr/slogr/slogr.go
+++ b/vendor/github.com/go-logr/logr/slogr/slogr.go
@@ -23,10 +23,11 @@ limitations under the License.
 //
 // See the README in the top-level [./logr] package for a discussion of
 // interoperability.
+//
+// Deprecated: use the main logr package instead.
 package slogr
 
 import (
-	"context"
 	"log/slog"
 
 	"github.com/go-logr/logr"
@@ -34,75 +35,27 @@ import (
 
 // NewLogr returns a logr.Logger which writes to the slog.Handler.
 //
-// The logr verbosity level is mapped to slog levels such that V(0) becomes
-// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+// Deprecated: use [logr.FromSlogHandler] instead.
 func NewLogr(handler slog.Handler) logr.Logger {
-	if handler, ok := handler.(*slogHandler); ok {
-		if handler.sink == nil {
-			return logr.Discard()
-		}
-		return logr.New(handler.sink).V(int(handler.levelBias))
-	}
-	return logr.New(&slogSink{handler: handler})
+	return logr.FromSlogHandler(handler)
 }
 
 // NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
 //
-// The returned logger writes all records with level >= slog.LevelError as
-// error log entries with LogSink.Error, regardless of the verbosity level of
-// the logr.Logger:
-//
-//	logger := <some logr.Logger with 0 as verbosity level>
-//	slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
-//
-// The level of all other records gets reduced by the verbosity
-// level of the logr.Logger and the result is negated. If it happens
-// to be negative, then it gets replaced by zero because a LogSink
-// is not expected to handled negative levels:
-//
-//	slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
-//	slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
-//	slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
-//	slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+// Deprecated: use [logr.ToSlogHandler] instead.
 func NewSlogHandler(logger logr.Logger) slog.Handler {
-	if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
-		return sink.handler
-	}
+	return logr.ToSlogHandler(logger)
+}
 
-	handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
-	if slogSink, ok := handler.sink.(SlogSink); ok {
-		handler.slogSink = slogSink
-	}
-	return handler
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
+//
+// Deprecated: use [logr.ToSlogHandler] instead.
+func ToSlogHandler(logger logr.Logger) slog.Handler {
+	return logr.ToSlogHandler(logger)
 }
 
 // SlogSink is an optional interface that a LogSink can implement to support
-// logging through the slog.Logger or slog.Handler APIs better. It then should
-// also support special slog values like slog.Group. When used as a
-// slog.Handler, the advantages are:
+// logging through the slog.Logger or slog.Handler APIs better.
 //
-//   - stack unwinding gets avoided in favor of logging the pre-recorded PC,
-//     as intended by slog
-//   - proper grouping of key/value pairs via WithGroup
-//   - verbosity levels > slog.LevelInfo can be recorded
-//   - less overhead
-//
-// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally
-// well. Developers can pick whatever API suits them better and/or mix
-// packages which use either API in the same binary with a common logging
-// implementation.
-//
-// This interface is necessary because the type implementing the LogSink
-// interface cannot also implement the slog.Handler interface due to the
-// different prototype of the common Enabled method.
-//
-// An implementation could support both interfaces in two different types, but then
-// additional interfaces would be needed to convert between those types in NewLogr
-// and NewSlogHandler.
-type SlogSink interface {
-	logr.LogSink
-
-	Handle(ctx context.Context, record slog.Record) error
-	WithAttrs(attrs []slog.Attr) SlogSink
-	WithGroup(name string) SlogSink
-}
+// Deprecated: use [logr.SlogSink] instead.
+type SlogSink = logr.SlogSink
diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
similarity index 82%
rename from vendor/github.com/go-logr/logr/slogr/slogsink.go
rename to vendor/github.com/go-logr/logr/slogsink.go
index 6fbac561d98b1b36162986bbef5c4e9133871eb4..4060fcbc2b0b3415e18f7c9a252b5b3ff3fdda11 100644
--- a/vendor/github.com/go-logr/logr/slogr/slogsink.go
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -17,24 +17,22 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
-package slogr
+package logr
 
 import (
 	"context"
 	"log/slog"
 	"runtime"
 	"time"
-
-	"github.com/go-logr/logr"
 )
 
 var (
-	_ logr.LogSink          = &slogSink{}
-	_ logr.CallDepthLogSink = &slogSink{}
-	_ Underlier             = &slogSink{}
+	_ LogSink          = &slogSink{}
+	_ CallDepthLogSink = &slogSink{}
+	_ Underlier        = &slogSink{}
 )
 
-// Underlier is implemented by the LogSink returned by NewLogr.
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
 type Underlier interface {
 	// GetUnderlying returns the Handler used by the LogSink.
 	GetUnderlying() slog.Handler
@@ -54,7 +52,7 @@ type slogSink struct {
 	handler   slog.Handler
 }
 
-func (l *slogSink) Init(info logr.RuntimeInfo) {
+func (l *slogSink) Init(info RuntimeInfo) {
 	l.callDepth = info.CallDepth
 }
 
@@ -62,7 +60,7 @@ func (l *slogSink) GetUnderlying() slog.Handler {
 	return l.handler
 }
 
-func (l *slogSink) WithCallDepth(depth int) logr.LogSink {
+func (l *slogSink) WithCallDepth(depth int) LogSink {
 	newLogger := *l
 	newLogger.callDepth += depth
 	return &newLogger
@@ -93,18 +91,18 @@ func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interf
 		record.AddAttrs(slog.Any(errKey, err))
 	}
 	record.Add(kvList...)
-	l.handler.Handle(context.Background(), record)
+	_ = l.handler.Handle(context.Background(), record)
 }
 
-func (l slogSink) WithName(name string) logr.LogSink {
+func (l slogSink) WithName(name string) LogSink {
 	if l.name != "" {
-		l.name = l.name + "/"
+		l.name += "/"
 	}
 	l.name += name
 	return &l
 }
 
-func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink {
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
 	l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
 	return &l
 }
diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5158667890c05b400c5a2db9896d596380024a06
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/README.md
@@ -0,0 +1,6 @@
+# Minimal Go logging using logr and Go's standard library
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
+
+This package implements the [logr interface](https://github.com/go-logr/logr)
+in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
new file mode 100644
index 0000000000000000000000000000000000000000..93a8aab51be840c701361ee6e984b057ae3dc53e
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/stdr.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package stdr implements github.com/go-logr/logr.Logger in terms of
+// Go's standard log package.
+package stdr
+
+import (
+	"log"
+	"os"
+
+	"github.com/go-logr/logr"
+	"github.com/go-logr/logr/funcr"
+)
+
+// The global verbosity level.  See SetVerbosity().
+var globalVerbosity int
+
+// SetVerbosity sets the global level against which all info logs will be
+// compared.  If this is greater than or equal to the "V" of the logger, the
+// message will be logged.  A higher value here means more logs will be written.
+// The previous verbosity value is returned.  This is not concurrent-safe -
+// callers must be sure to call it from only one goroutine.
+func SetVerbosity(v int) int {
+	old := globalVerbosity
+	globalVerbosity = v
+	return old
+}
+
+// New returns a logr.Logger which is implemented by Go's standard log package,
+// or something like it.  If std is nil, this will use a default logger
+// instead.
+//
+// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+func New(std StdLogger) logr.Logger {
+	return NewWithOptions(std, Options{})
+}
+
+// NewWithOptions returns a logr.Logger which is implemented by Go's standard
+// log package, or something like it.  See New for details.
+func NewWithOptions(std StdLogger, opts Options) logr.Logger {
+	if std == nil {
+		// Go's log.Default() is only available in 1.16 and higher.
+		std = log.New(os.Stderr, "", log.LstdFlags)
+	}
+
+	if opts.Depth < 0 {
+		opts.Depth = 0
+	}
+
+	fopts := funcr.Options{
+		LogCaller: funcr.MessageClass(opts.LogCaller),
+	}
+
+	sl := &logger{
+		Formatter: funcr.NewFormatter(fopts),
+		std:       std,
+	}
+
+	// For skipping our own logger.Info/Error.
+	sl.Formatter.AddCallDepth(1 + opts.Depth)
+
+	return logr.New(sl)
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+	// Depth biases the assumed number of call frames to the "true" caller.
+	// This is useful when the calling code calls a function which then calls
+	// stdr (e.g. a logging shim to another API).  Values less than zero will
+	// be treated as zero.
+	Depth int
+
+	// LogCaller tells stdr to add a "caller" key to some or all log lines.
+	// Go's log package has options to log this natively, too.
+	LogCaller MessageClass
+
+	// TODO: add an option to log the date/time
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+	// None ignores all message classes.
+	None MessageClass = iota
+	// All considers all message classes.
+	All
+	// Info only considers info messages.
+	Info
+	// Error only considers error messages.
+	Error
+)
+
+// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
+// this adapter.
+type StdLogger interface {
+	// Output is the same as log.Output and log.Logger.Output.
+	Output(calldepth int, logline string) error
+}
+
+type logger struct {
+	funcr.Formatter
+	std StdLogger
+}
+
+var _ logr.LogSink = &logger{}
+var _ logr.CallDepthLogSink = &logger{}
+
+func (l logger) Enabled(level int) bool {
+	return globalVerbosity >= level
+}
+
+func (l logger) Info(level int, msg string, kvList ...interface{}) {
+	prefix, args := l.FormatInfo(level, msg, kvList)
+	if prefix != "" {
+		args = prefix + ": " + args
+	}
+	_ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) Error(err error, msg string, kvList ...interface{}) {
+	prefix, args := l.FormatError(err, msg, kvList)
+	if prefix != "" {
+		args = prefix + ": " + args
+	}
+	_ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) WithName(name string) logr.LogSink {
+	l.Formatter.AddName(name)
+	return &l
+}
+
+func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
+	l.Formatter.AddValues(kvList)
+	return &l
+}
+
+func (l logger) WithCallDepth(depth int) logr.LogSink {
+	l.Formatter.AddCallDepth(depth)
+	return &l
+}
+
+// Underlier exposes access to the underlying logging implementation.  Since
+// callers only have a logr.Logger, they have to know which implementation is
+// in use, so this interface is less of an abstraction and more of way to test
+// type conversion.
+type Underlier interface {
+	GetUnderlying() StdLogger
+}
+
+// GetUnderlying returns the StdLogger underneath this logger.  Since StdLogger
+// is itself an interface, the result may or may not be a Go log.Logger.
+func (l logger) GetUnderlying() StdLogger {
+	return l.std
+}
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
index 7ed347d3ad737590508500efd4b8d15cc8dbed7a..c9fb829dc64f4629f3f7b0efd249ebfd9ec0cb6a 100644
--- a/vendor/github.com/google/uuid/CHANGELOG.md
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -1,5 +1,12 @@
 # Changelog
 
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
 ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
 
 
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06cdc87aab2c0f71612088cd9b05c4552360..c351129279f32127baf9415e3fdb018ac55f9760 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@ func setClockSequence(seq int) {
 }
 
 // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid.  The time is only defined for version 1 and 2 UUIDs.
+// uuid.  The time is only defined for version 1, 2, 6 and 7 UUIDs.
 func (uuid UUID) Time() Time {
-	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
-	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
-	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
-	return Time(time)
+	var t Time
+	switch uuid.Version() {
+	case 6:
+		time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+		t = Time(time)
+	case 7:
+		time := binary.BigEndian.Uint64(uuid[:8])
+		t = Time((time>>16)*10000 + g1582ns100)
+	default: // forward compatible
+		time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+		time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+		time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+		t = Time(time)
+	}
+	return t
 }
 
 // ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index dc75f7d990919da40fdc63e7d17514e08efbc649..5232b486780d605d2c6bcc0b268bf93ba4e309c8 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
 	return uuid
 }
 
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+//   xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+//   {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+	switch len(s) {
+	// Standard UUID format
+	case 36:
+
+	// UUID with "urn:uuid:" prefix
+	case 36 + 9:
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
+			return fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// UUID enclosed in braces
+	case 36 + 2:
+		if s[0] != '{' || s[len(s)-1] != '}' {
+			return fmt.Errorf("invalid bracketed UUID format")
+		}
+		s = s[1 : len(s)-1]
+
+	// UUID without hyphens
+	case 32:
+		for i := 0; i < len(s); i += 2 {
+			_, ok := xtob(s[i], s[i+1])
+			if !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+
+	default:
+		return invalidLengthError{len(s)}
+	}
+
+	// Check for standard UUID format
+	if len(s) == 36 {
+		if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+			return errors.New("invalid UUID format")
+		}
+		for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+			if _, ok := xtob(s[x], s[x+1]); !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+	}
+
+	return nil
+}
+
 // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 // , or "" if uuid is invalid.
 func (uuid UUID) String() string {
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000000000000000000000000000000000000..339a959a7a2629181683466d3bef1edc83c9db28
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	/*
+	    0                   1                   2                   3
+	    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                           time_high                           |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |           time_mid            |      time_low_and_version     |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |clk_seq_hi_res |  clk_seq_low  |         node (0-1)            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                         node (2-5)                            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+
+	binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+
+	uuid[6] = 0x60 | (uuid[6] & 0x0F)
+	uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	copy(uuid[10:], nodeID[:])
+	nodeMu.Unlock()
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba9dd5eb689b9bd3f2e547ed19b79655eac20822
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,75 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+	uuid, err := NewRandom()
+	if err != nil {
+		return uuid, err
+	}
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+	uuid, err := NewRandomFromReader(r)
+	if err != nil {
+		return uuid, err
+	}
+
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function  NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+	/*
+		 0                   1                   2                   3
+		 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                           unix_ts_ms                          |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|          unix_ts_ms           |  ver  |       rand_a          |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|var|                        rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                            rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+	_ = uuid[15] // bounds check
+
+	t := timeNow().UnixMilli()
+
+	uuid[0] = byte(t >> 40)
+	uuid[1] = byte(t >> 32)
+	uuid[2] = byte(t >> 24)
+	uuid[3] = byte(t >> 16)
+	uuid[4] = byte(t >> 8)
+	uuid[5] = byte(t)
+
+	uuid[6] = 0x70 | (uuid[6] & 0x0F)
+	// uuid[8] has already has right version
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..364516251b93b3fc05ccbe0b03098b274c6f75bf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
new file mode 100644
index 0000000000000000000000000000000000000000..b8fbb2b77c4019011e82efcd8fa250264c1403c3
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "httprule",
+    srcs = [
+        "compile.go",
+        "parse.go",
+        "types.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
+    deps = ["//utilities"],
+)
+
+go_test(
+    name = "httprule_test",
+    size = "small",
+    srcs = [
+        "compile_test.go",
+        "parse_test.go",
+        "types_test.go",
+    ],
+    embed = [":httprule"],
+    deps = [
+        "//utilities",
+        "@org_golang_google_grpc//grpclog",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":httprule",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
new file mode 100644
index 0000000000000000000000000000000000000000..3cd9372959d3673dd06f4276c2fab8319954236b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
@@ -0,0 +1,121 @@
+package httprule
+
+import (
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+)
+
+const (
+	opcodeVersion = 1
+)
+
+// Template is a compiled representation of path templates.
+type Template struct {
+	// Version is the version number of the format.
+	Version int
+	// OpCodes is a sequence of operations.
+	OpCodes []int
+	// Pool is a constant pool
+	Pool []string
+	// Verb is a VERB part in the template.
+	Verb string
+	// Fields is a list of field paths bound in this template.
+	Fields []string
+	// Original template (example: /v1/a_bit_of_everything)
+	Template string
+}
+
+// Compiler compiles utilities representation of path templates into marshallable operations.
+// They can be unmarshalled by runtime.NewPattern.
+type Compiler interface {
+	Compile() Template
+}
+
+type op struct {
+	// code is the opcode of the operation
+	code utilities.OpCode
+
+	// str is a string operand of the code.
+	// num is ignored if str is not empty.
+	str string
+
+	// num is a numeric operand of the code.
+	num int
+}
+
+func (w wildcard) compile() []op {
+	return []op{
+		{code: utilities.OpPush},
+	}
+}
+
+func (w deepWildcard) compile() []op {
+	return []op{
+		{code: utilities.OpPushM},
+	}
+}
+
+func (l literal) compile() []op {
+	return []op{
+		{
+			code: utilities.OpLitPush,
+			str:  string(l),
+		},
+	}
+}
+
+func (v variable) compile() []op {
+	var ops []op
+	for _, s := range v.segments {
+		ops = append(ops, s.compile()...)
+	}
+	ops = append(ops, op{
+		code: utilities.OpConcatN,
+		num:  len(v.segments),
+	}, op{
+		code: utilities.OpCapture,
+		str:  v.path,
+	})
+
+	return ops
+}
+
+func (t template) Compile() Template {
+	var rawOps []op
+	for _, s := range t.segments {
+		rawOps = append(rawOps, s.compile()...)
+	}
+
+	var (
+		ops    []int
+		pool   []string
+		fields []string
+	)
+	consts := make(map[string]int)
+	for _, op := range rawOps {
+		ops = append(ops, int(op.code))
+		if op.str == "" {
+			ops = append(ops, op.num)
+		} else {
+			// eof segment literal represents the "/" path pattern
+			if op.str == eof {
+				op.str = ""
+			}
+			if _, ok := consts[op.str]; !ok {
+				consts[op.str] = len(pool)
+				pool = append(pool, op.str)
+			}
+			ops = append(ops, consts[op.str])
+		}
+		if op.code == utilities.OpCapture {
+			fields = append(fields, op.str)
+		}
+	}
+	return Template{
+		Version:  opcodeVersion,
+		OpCodes:  ops,
+		Pool:     pool,
+		Verb:     t.verb,
+		Fields:   fields,
+		Template: t.template,
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
new file mode 100644
index 0000000000000000000000000000000000000000..c056bd3058ade4145f3069b9feed1487af72df75
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -0,0 +1,11 @@
+//go:build gofuzz
+// +build gofuzz
+
+package httprule
+
+func Fuzz(data []byte) int {
+	if _, err := Parse(string(data)); err != nil {
+		return 0
+	}
+	return 0
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
new file mode 100644
index 0000000000000000000000000000000000000000..65ffcf5cf871d7c7c20dd9ea852309182f2d80b1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -0,0 +1,368 @@
+package httprule
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+// InvalidTemplateError indicates that the path template is not valid.
+type InvalidTemplateError struct {
+	tmpl string
+	msg  string
+}
+
+func (e InvalidTemplateError) Error() string {
+	return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
+}
+
+// Parse parses the string representation of path template
+func Parse(tmpl string) (Compiler, error) {
+	if !strings.HasPrefix(tmpl, "/") {
+		return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
+	}
+	tokens, verb := tokenize(tmpl[1:])
+
+	p := parser{tokens: tokens}
+	segs, err := p.topLevelSegments()
+	if err != nil {
+		return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
+	}
+
+	return template{
+		segments: segs,
+		verb:     verb,
+		template: tmpl,
+	}, nil
+}
+
+func tokenize(path string) (tokens []string, verb string) {
+	if path == "" {
+		return []string{eof}, ""
+	}
+
+	const (
+		init = iota
+		field
+		nested
+	)
+	st := init
+	for path != "" {
+		var idx int
+		switch st {
+		case init:
+			idx = strings.IndexAny(path, "/{")
+		case field:
+			idx = strings.IndexAny(path, ".=}")
+		case nested:
+			idx = strings.IndexAny(path, "/}")
+		}
+		if idx < 0 {
+			tokens = append(tokens, path)
+			break
+		}
+		switch r := path[idx]; r {
+		case '/', '.':
+		case '{':
+			st = field
+		case '=':
+			st = nested
+		case '}':
+			st = init
+		}
+		if idx == 0 {
+			tokens = append(tokens, path[idx:idx+1])
+		} else {
+			tokens = append(tokens, path[:idx], path[idx:idx+1])
+		}
+		path = path[idx+1:]
+	}
+
+	l := len(tokens)
+	// See
+	// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
+	// although normal and backwards-compat logic here is to use the last index
+	// of a colon, if the final segment is a variable followed by a colon, the
+	// part following the colon must be a verb. Hence if the previous token is
+	// an end var marker, we switch the index we're looking for to Index instead
+	// of LastIndex, so that we correctly grab the remaining part of the path as
+	// the verb.
+	var penultimateTokenIsEndVar bool
+	switch l {
+	case 0, 1:
+		// Not enough to be variable so skip this logic and don't result in an
+		// invalid index
+	default:
+		penultimateTokenIsEndVar = tokens[l-2] == "}"
+	}
+	t := tokens[l-1]
+	var idx int
+	if penultimateTokenIsEndVar {
+		idx = strings.Index(t, ":")
+	} else {
+		idx = strings.LastIndex(t, ":")
+	}
+	if idx == 0 {
+		tokens, verb = tokens[:l-1], t[1:]
+	} else if idx > 0 {
+		tokens[l-1], verb = t[:idx], t[idx+1:]
+	}
+	tokens = append(tokens, eof)
+	return tokens, verb
+}
+
+// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
+type parser struct {
+	tokens   []string
+	accepted []string
+}
+
+// topLevelSegments is the target of this parser.
+func (p *parser) topLevelSegments() ([]segment, error) {
+	if _, err := p.accept(typeEOF); err == nil {
+		p.tokens = p.tokens[:0]
+		return []segment{literal(eof)}, nil
+	}
+	segs, err := p.segments()
+	if err != nil {
+		return nil, err
+	}
+	if _, err := p.accept(typeEOF); err != nil {
+		return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
+	}
+	return segs, nil
+}
+
+func (p *parser) segments() ([]segment, error) {
+	s, err := p.segment()
+	if err != nil {
+		return nil, err
+	}
+
+	segs := []segment{s}
+	for {
+		if _, err := p.accept("/"); err != nil {
+			return segs, nil
+		}
+		s, err := p.segment()
+		if err != nil {
+			return segs, err
+		}
+		segs = append(segs, s)
+	}
+}
+
+func (p *parser) segment() (segment, error) {
+	if _, err := p.accept("*"); err == nil {
+		return wildcard{}, nil
+	}
+	if _, err := p.accept("**"); err == nil {
+		return deepWildcard{}, nil
+	}
+	if l, err := p.literal(); err == nil {
+		return l, nil
+	}
+
+	v, err := p.variable()
+	if err != nil {
+		return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
+	}
+	return v, nil
+}
+
+func (p *parser) literal() (segment, error) {
+	lit, err := p.accept(typeLiteral)
+	if err != nil {
+		return nil, err
+	}
+	return literal(lit), nil
+}
+
+func (p *parser) variable() (segment, error) {
+	if _, err := p.accept("{"); err != nil {
+		return nil, err
+	}
+
+	path, err := p.fieldPath()
+	if err != nil {
+		return nil, err
+	}
+
+	var segs []segment
+	if _, err := p.accept("="); err == nil {
+		segs, err = p.segments()
+		if err != nil {
+			return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
+		}
+	} else {
+		segs = []segment{wildcard{}}
+	}
+
+	if _, err := p.accept("}"); err != nil {
+		return nil, fmt.Errorf("unterminated variable segment: %s", path)
+	}
+	return variable{
+		path:     path,
+		segments: segs,
+	}, nil
+}
+
+func (p *parser) fieldPath() (string, error) {
+	c, err := p.accept(typeIdent)
+	if err != nil {
+		return "", err
+	}
+	components := []string{c}
+	for {
+		if _, err := p.accept("."); err != nil {
+			return strings.Join(components, "."), nil
+		}
+		c, err := p.accept(typeIdent)
+		if err != nil {
+			return "", fmt.Errorf("invalid field path component: %w", err)
+		}
+		components = append(components, c)
+	}
+}
+
+// A termType is a type of terminal symbols.
+type termType string
+
+// These constants define some of valid values of termType.
+// They improve readability of parse functions.
+//
+// You can also use "/", "*", "**", "." or "=" as valid values.
+const (
+	typeIdent   = termType("ident")
+	typeLiteral = termType("literal")
+	typeEOF     = termType("$")
+)
+
+// eof is the terminal symbol which always appears at the end of token sequence.
+const eof = "\u0000"
+
+// accept tries to accept a token in "p".
+// This function consumes a token and returns it if it matches to the specified "term".
+// If it doesn't match, the function does not consume any tokens and return an error.
+func (p *parser) accept(term termType) (string, error) {
+	t := p.tokens[0]
+	switch term {
+	case "/", "*", "**", ".", "=", "{", "}":
+		if t != string(term) && t != "/" {
+			return "", fmt.Errorf("expected %q but got %q", term, t)
+		}
+	case typeEOF:
+		if t != eof {
+			return "", fmt.Errorf("expected EOF but got %q", t)
+		}
+	case typeIdent:
+		if err := expectIdent(t); err != nil {
+			return "", err
+		}
+	case typeLiteral:
+		if err := expectPChars(t); err != nil {
+			return "", err
+		}
+	default:
+		return "", fmt.Errorf("unknown termType %q", term)
+	}
+	p.tokens = p.tokens[1:]
+	p.accepted = append(p.accepted, t)
+	return t, nil
+}
+
+// expectPChars determines if "t" consists of only pchars defined in RFC3986.
+//
+// https://www.ietf.org/rfc/rfc3986.txt, P.49
+//
+//	pchar         = unreserved / pct-encoded / sub-delims / ":" / "@"
+//	unreserved    = ALPHA / DIGIT / "-" / "." / "_" / "~"
+//	sub-delims    = "!" / "$" / "&" / "'" / "(" / ")"
+//	              / "*" / "+" / "," / ";" / "="
+//	pct-encoded   = "%" HEXDIG HEXDIG
+func expectPChars(t string) error {
+	const (
+		init = iota
+		pct1
+		pct2
+	)
+	st := init
+	for _, r := range t {
+		if st != init {
+			if !isHexDigit(r) {
+				return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
+			}
+			switch st {
+			case pct1:
+				st = pct2
+			case pct2:
+				st = init
+			}
+			continue
+		}
+
+		// unreserved
+		switch {
+		case 'A' <= r && r <= 'Z':
+			continue
+		case 'a' <= r && r <= 'z':
+			continue
+		case '0' <= r && r <= '9':
+			continue
+		}
+		switch r {
+		case '-', '.', '_', '~':
+			// unreserved
+		case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
+			// sub-delims
+		case ':', '@':
+			// rest of pchar
+		case '%':
+			// pct-encoded
+			st = pct1
+		default:
+			return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
+		}
+	}
+	if st != init {
+		return fmt.Errorf("invalid percent-encoding in %q", t)
+	}
+	return nil
+}
+
+// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
+func expectIdent(ident string) error {
+	if ident == "" {
+		return errors.New("empty identifier")
+	}
+	for pos, r := range ident {
+		switch {
+		case '0' <= r && r <= '9':
+			if pos == 0 {
+				return fmt.Errorf("identifier starting with digit: %s", ident)
+			}
+			continue
+		case 'A' <= r && r <= 'Z':
+			continue
+		case 'a' <= r && r <= 'z':
+			continue
+		case r == '_':
+			continue
+		default:
+			return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
+		}
+	}
+	return nil
+}
+
+func isHexDigit(r rune) bool {
+	switch {
+	case '0' <= r && r <= '9':
+		return true
+	case 'A' <= r && r <= 'F':
+		return true
+	case 'a' <= r && r <= 'f':
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a814a0004cec5ceca84b96e0be333a372df222d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
@@ -0,0 +1,60 @@
+package httprule
+
+import (
+	"fmt"
+	"strings"
+)
+
+type template struct {
+	segments []segment
+	verb     string
+	template string
+}
+
+type segment interface {
+	fmt.Stringer
+	compile() (ops []op)
+}
+
+type wildcard struct{}
+
+type deepWildcard struct{}
+
+type literal string
+
+type variable struct {
+	path     string
+	segments []segment
+}
+
+func (wildcard) String() string {
+	return "*"
+}
+
+func (deepWildcard) String() string {
+	return "**"
+}
+
+func (l literal) String() string {
+	return string(l)
+}
+
+func (v variable) String() string {
+	var segs []string
+	for _, s := range v.segments {
+		segs = append(segs, s.String())
+	}
+	return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
+}
+
+func (t template) String() string {
+	var segs []string
+	for _, s := range t.segments {
+		segs = append(segs, s.String())
+	}
+	str := strings.Join(segs, "/")
+	if t.verb != "" {
+		str = fmt.Sprintf("%s:%s", str, t.verb)
+	}
+	return "/" + str
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
new file mode 100644
index 0000000000000000000000000000000000000000..78d7c9f5c883b5426e8dbbba3255df3d115bd798
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -0,0 +1,97 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "runtime",
+    srcs = [
+        "context.go",
+        "convert.go",
+        "doc.go",
+        "errors.go",
+        "fieldmask.go",
+        "handler.go",
+        "marshal_httpbodyproto.go",
+        "marshal_json.go",
+        "marshal_jsonpb.go",
+        "marshal_proto.go",
+        "marshaler.go",
+        "marshaler_registry.go",
+        "mux.go",
+        "pattern.go",
+        "proto2_convert.go",
+        "query.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
+    deps = [
+        "//internal/httprule",
+        "//utilities",
+        "@org_golang_google_genproto_googleapis_api//httpbody",
+        "@org_golang_google_grpc//codes",
+        "@org_golang_google_grpc//grpclog",
+        "@org_golang_google_grpc//health/grpc_health_v1",
+        "@org_golang_google_grpc//metadata",
+        "@org_golang_google_grpc//status",
+        "@org_golang_google_protobuf//encoding/protojson",
+        "@org_golang_google_protobuf//proto",
+        "@org_golang_google_protobuf//reflect/protoreflect",
+        "@org_golang_google_protobuf//reflect/protoregistry",
+        "@org_golang_google_protobuf//types/known/durationpb",
+        "@org_golang_google_protobuf//types/known/fieldmaskpb",
+        "@org_golang_google_protobuf//types/known/structpb",
+        "@org_golang_google_protobuf//types/known/timestamppb",
+        "@org_golang_google_protobuf//types/known/wrapperspb",
+    ],
+)
+
+go_test(
+    name = "runtime_test",
+    size = "small",
+    srcs = [
+        "context_test.go",
+        "convert_test.go",
+        "errors_test.go",
+        "fieldmask_test.go",
+        "handler_test.go",
+        "marshal_httpbodyproto_test.go",
+        "marshal_json_test.go",
+        "marshal_jsonpb_test.go",
+        "marshal_proto_test.go",
+        "marshaler_registry_test.go",
+        "mux_internal_test.go",
+        "mux_test.go",
+        "pattern_test.go",
+        "query_fuzz_test.go",
+        "query_test.go",
+    ],
+    embed = [":runtime"],
+    deps = [
+        "//runtime/internal/examplepb",
+        "//utilities",
+        "@com_github_google_go_cmp//cmp",
+        "@com_github_google_go_cmp//cmp/cmpopts",
+        "@org_golang_google_genproto_googleapis_api//httpbody",
+        "@org_golang_google_genproto_googleapis_rpc//errdetails",
+        "@org_golang_google_genproto_googleapis_rpc//status",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//codes",
+        "@org_golang_google_grpc//health/grpc_health_v1",
+        "@org_golang_google_grpc//metadata",
+        "@org_golang_google_grpc//status",
+        "@org_golang_google_protobuf//encoding/protojson",
+        "@org_golang_google_protobuf//proto",
+        "@org_golang_google_protobuf//testing/protocmp",
+        "@org_golang_google_protobuf//types/known/durationpb",
+        "@org_golang_google_protobuf//types/known/emptypb",
+        "@org_golang_google_protobuf//types/known/fieldmaskpb",
+        "@org_golang_google_protobuf//types/known/structpb",
+        "@org_golang_google_protobuf//types/known/timestamppb",
+        "@org_golang_google_protobuf//types/known/wrapperspb",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":runtime",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..31553e7848a92b5059663f5acfa99bbfd90a05a4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -0,0 +1,401 @@
+package runtime
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"net/textproto"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+var DefaultContextTimeout = 0 * time.Second
+
+// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
+// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
+var malformedHTTPHeaders = map[string]struct{}{
+	"connection": {},
+}
+
+type (
+	rpcMethodKey       struct{}
+	httpPathPatternKey struct{}
+
+	AnnotateContextOption func(ctx context.Context) context.Context
+)
+
+func WithHTTPPathPattern(pattern string) AnnotateContextOption {
+	return func(ctx context.Context) context.Context {
+		return withHTTPPathPattern(ctx, pattern)
+	}
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+	ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+	if err != nil {
+		return nil, err
+	}
+	if md == nil {
+		return ctx, nil
+	}
+
+	return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// AnnotateIncomingContext adds context information such as metadata from the request.
+// Attach metadata as incoming context.
+func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+	ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+	if err != nil {
+		return nil, err
+	}
+	if md == nil {
+		return ctx, nil
+	}
+
+	return metadata.NewIncomingContext(ctx, md), nil
+}
+
+func isValidGRPCMetadataKey(key string) bool {
+	// Must be a valid gRPC "Header-Name" as defined here:
+	//   https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+	// This means 0-9 a-z _ - .
+	// Only lowercase letters are valid in the wire protocol, but the client library will normalize
+	// uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
+	bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
+	for _, ch := range bytes {
+		validLowercaseLetter := ch >= 'a' && ch <= 'z'
+		validUppercaseLetter := ch >= 'A' && ch <= 'Z'
+		validDigit := ch >= '0' && ch <= '9'
+		validOther := ch == '.' || ch == '-' || ch == '_'
+		if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
+			return false
+		}
+	}
+	return true
+}
+
+func isValidGRPCMetadataTextValue(textValue string) bool {
+	// Must be a valid gRPC "ASCII-Value" as defined here:
+	//   https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+	// This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
+	bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
+	for _, ch := range bytes {
+		if ch < 0x20 || ch > 0x7E {
+			return false
+		}
+	}
+	return true
+}
+
+func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
+	ctx = withRPCMethod(ctx, rpcMethodName)
+	for _, o := range options {
+		ctx = o(ctx)
+	}
+	timeout := DefaultContextTimeout
+	if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+		var err error
+		timeout, err = timeoutDecode(tm)
+		if err != nil {
+			return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+		}
+	}
+	var pairs []string
+	for key, vals := range req.Header {
+		key = textproto.CanonicalMIMEHeaderKey(key)
+		for _, val := range vals {
+			// For backwards-compatibility, pass through 'authorization' header with no prefix.
+			if key == "Authorization" {
+				pairs = append(pairs, "authorization", val)
+			}
+			if h, ok := mux.incomingHeaderMatcher(key); ok {
+				if !isValidGRPCMetadataKey(h) {
+					grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
+					continue
+				}
+				// Handles "-bin" metadata in grpc, since grpc will do another base64
+				// encode before sending to server, we need to decode it first.
+				if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+					b, err := decodeBinHeader(val)
+					if err != nil {
+						return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+					}
+
+					val = string(b)
+				} else if !isValidGRPCMetadataTextValue(val) {
+					grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
+					continue
+				}
+				pairs = append(pairs, h, val)
+			}
+		}
+	}
+	if host := req.Header.Get(xForwardedHost); host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+	} else if req.Host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+	}
+
+	if addr := req.RemoteAddr; addr != "" {
+		if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+			if fwd := req.Header.Get(xForwardedFor); fwd == "" {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
+			} else {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
+			}
+		}
+	}
+
+	if timeout != 0 {
+		//nolint:govet  // The context outlives this function
+		ctx, _ = context.WithTimeout(ctx, timeout)
+	}
+	if len(pairs) == 0 {
+		return ctx, nil, nil
+	}
+	md := metadata.Pairs(pairs...)
+	for _, mda := range mux.metadataAnnotators {
+		md = metadata.Join(md, mda(ctx, req))
+	}
+	return ctx, md, nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+	HeaderMD  metadata.MD
+	TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+	return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+	if ctx == nil {
+		return md, false
+	}
+	md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+	return
+}
+
+// ServerTransportStream implements grpc.ServerTransportStream.
+// It should only be used by the generated files to support grpc.SendHeader
+// outside of gRPC server use.
+type ServerTransportStream struct {
+	mu      sync.Mutex
+	header  metadata.MD
+	trailer metadata.MD
+}
+
+// Method returns the method for the stream.
+func (s *ServerTransportStream) Method() string {
+	return ""
+}
+
+// Header returns the header metadata of the stream.
+func (s *ServerTransportStream) Header() metadata.MD {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.header.Copy()
+}
+
+// SetHeader sets the header metadata.
+func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+
+	s.mu.Lock()
+	s.header = metadata.Join(s.header, md)
+	s.mu.Unlock()
+	return nil
+}
+
+// SendHeader sets the header metadata.
+func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
+	return s.SetHeader(md)
+}
+
+// Trailer returns the cached trailer metadata.
+func (s *ServerTransportStream) Trailer() metadata.MD {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.trailer.Copy()
+}
+
+// SetTrailer sets the trailer metadata.
+func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+
+	s.mu.Lock()
+	s.trailer = metadata.Join(s.trailer, md)
+	s.mu.Unlock()
+	return nil
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("timeout string is too short: %q", s)
+	}
+	d, ok := timeoutUnitToDuration(s[size-1])
+	if !ok {
+		return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+	switch u {
+	case 'H':
+		return time.Hour, true
+	case 'M':
+		return time.Minute, true
+	case 'S':
+		return time.Second, true
+	case 'm':
+		return time.Millisecond, true
+	case 'u':
+		return time.Microsecond, true
+	case 'n':
+		return time.Nanosecond, true
+	default:
+		return
+	}
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permanent request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+	switch hdr {
+	case
+		"Accept",
+		"Accept-Charset",
+		"Accept-Language",
+		"Accept-Ranges",
+		"Authorization",
+		"Cache-Control",
+		"Content-Type",
+		"Cookie",
+		"Date",
+		"Expect",
+		"From",
+		"Host",
+		"If-Match",
+		"If-Modified-Since",
+		"If-None-Match",
+		"If-Schedule-Tag-Match",
+		"If-Unmodified-Since",
+		"Max-Forwards",
+		"Origin",
+		"Pragma",
+		"Referer",
+		"User-Agent",
+		"Via",
+		"Warning":
+		return true
+	}
+	return false
+}
+
+// isMalformedHTTPHeader checks whether header belongs to the list of
+// "malformed headers" and would be rejected by the gRPC server.
+func isMalformedHTTPHeader(header string) bool {
+	_, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
+	return isMalformed
+}
+
+// RPCMethod returns the method string for the server context. The returned
+// string is in the format of "/package.service/method".
+func RPCMethod(ctx context.Context) (string, bool) {
+	m := ctx.Value(rpcMethodKey{})
+	if m == nil {
+		return "", false
+	}
+	ms, ok := m.(string)
+	if !ok {
+		return "", false
+	}
+	return ms, true
+}
+
+func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
+	return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
+}
+
+// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
+// The format of the returned string is defined by the google.api.http path template type.
+func HTTPPathPattern(ctx context.Context) (string, bool) {
+	m := ctx.Value(httpPathPatternKey{})
+	if m == nil {
+		return "", false
+	}
+	ms, ok := m.(string)
+	if !ok {
+		return "", false
+	}
+	return ms, true
+}
+
+func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
+	return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7b15fcfb3f853e95ba3d36cdc707bf543560b86
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -0,0 +1,318 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/types/known/durationpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
+	"google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+	return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+	return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+	return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+	s := strings.Split(val, sep)
+	values := make([]bool, len(s))
+	for i, v := range s {
+		value, err := Bool(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+	return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+	s := strings.Split(val, sep)
+	values := make([]float64, len(s))
+	for i, v := range s {
+		value, err := Float64(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+	f, err := strconv.ParseFloat(val, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+	s := strings.Split(val, sep)
+	values := make([]float32, len(s))
+	for i, v := range s {
+		value, err := Float32(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+	return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+	s := strings.Split(val, sep)
+	values := make([]int64, len(s))
+	for i, v := range s {
+		value, err := Int64(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+	i, err := strconv.ParseInt(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Int32(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+	return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint64, len(s))
+	for i, v := range s {
+		value, err := Uint64(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+	i, err := strconv.ParseUint(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint32, len(s))
+	for i, v := range s {
+		value, err := Uint32(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+	b, err := base64.StdEncoding.DecodeString(val)
+	if err != nil {
+		b, err = base64.URLEncoding.DecodeString(val)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+	s := strings.Split(val, sep)
+	values := make([][]byte, len(s))
+	for i, v := range s {
+		value, err := Bytes(v)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamppb.Timestamp, error) {
+	var r timestamppb.Timestamp
+	val = strconv.Quote(strings.Trim(val, `"`))
+	unmarshaler := &protojson.UnmarshalOptions{}
+	if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+		return nil, err
+	}
+	return &r, nil
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*durationpb.Duration, error) {
+	var r durationpb.Duration
+	val = strconv.Quote(strings.Trim(val, `"`))
+	unmarshaler := &protojson.UnmarshalOptions{}
+	if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+		return nil, err
+	}
+	return &r, nil
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+	e, ok := enumValMap[val]
+	if ok {
+		return e, nil
+	}
+
+	i, err := Int32(val)
+	if err != nil {
+		return 0, fmt.Errorf("%s is not valid", val)
+	}
+	for _, v := range enumValMap {
+		if v == i {
+			return i, nil
+		}
+	}
+	return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Enum(v, enumValMap)
+		if err != nil {
+			return nil, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Support for google.protobuf.wrappers on top of primitive types
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrapperspb.StringValue, error) {
+	return wrapperspb.String(val), nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrapperspb.FloatValue, error) {
+	parsedVal, err := Float32(val)
+	return wrapperspb.Float(parsedVal), err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
+	parsedVal, err := Float64(val)
+	return wrapperspb.Double(parsedVal), err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrapperspb.BoolValue, error) {
+	parsedVal, err := Bool(val)
+	return wrapperspb.Bool(parsedVal), err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrapperspb.Int32Value, error) {
+	parsedVal, err := Int32(val)
+	return wrapperspb.Int32(parsedVal), err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
+	parsedVal, err := Uint32(val)
+	return wrapperspb.UInt32(parsedVal), err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrapperspb.Int64Value, error) {
+	parsedVal, err := Int64(val)
+	return wrapperspb.Int64(parsedVal), err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
+	parsedVal, err := Uint64(val)
+	return wrapperspb.UInt64(parsedVal), err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrapperspb.BytesValue, error) {
+	parsedVal, err := Bytes(val)
+	return wrapperspb.Bytes(parsedVal), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..b6e5ddf7a9f1389dc91ed6497659ddfeb1c73cbd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..230cac7b86cf13b6732a355ad525142180b97154
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -0,0 +1,181 @@
+package runtime
+
+import (
+	"context"
+	"errors"
+	"io"
+	"net/http"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// ErrorHandlerFunc is the signature used to configure error handling.
+type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+// StreamErrorHandlerFunc is the signature used to configure stream error handling.
+type StreamErrorHandlerFunc func(context.Context, error) *status.Status
+
+// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
+type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
+
+// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
+// passed to the DefaultRoutingErrorHandler.
+type HTTPStatusError struct {
+	HTTPStatus int
+	Err        error
+}
+
+func (e *HTTPStatusError) Error() string {
+	return e.Err.Error()
+}
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+	switch code {
+	case codes.OK:
+		return http.StatusOK
+	case codes.Canceled:
+		return 499
+	case codes.Unknown:
+		return http.StatusInternalServerError
+	case codes.InvalidArgument:
+		return http.StatusBadRequest
+	case codes.DeadlineExceeded:
+		return http.StatusGatewayTimeout
+	case codes.NotFound:
+		return http.StatusNotFound
+	case codes.AlreadyExists:
+		return http.StatusConflict
+	case codes.PermissionDenied:
+		return http.StatusForbidden
+	case codes.Unauthenticated:
+		return http.StatusUnauthorized
+	case codes.ResourceExhausted:
+		return http.StatusTooManyRequests
+	case codes.FailedPrecondition:
+		// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+		return http.StatusBadRequest
+	case codes.Aborted:
+		return http.StatusConflict
+	case codes.OutOfRange:
+		return http.StatusBadRequest
+	case codes.Unimplemented:
+		return http.StatusNotImplemented
+	case codes.Internal:
+		return http.StatusInternalServerError
+	case codes.Unavailable:
+		return http.StatusServiceUnavailable
+	case codes.DataLoss:
+		return http.StatusInternalServerError
+	default:
+		grpclog.Infof("Unknown gRPC error code: %v", code)
+		return http.StatusInternalServerError
+	}
+}
+
+// HTTPError uses the mux-configured error handler.
+func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+	mux.errorHandler(ctx, mux, marshaler, w, r, err)
+}
+
+// DefaultHTTPErrorHandler is the default error handler.
+// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
+// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
+// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
+// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
+// are insufficient for.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body written by this function is a Status message marshaled by the Marshaler.
+func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+	// return Internal when Marshal failed
+	const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+
+	var customStatus *HTTPStatusError
+	if errors.As(err, &customStatus) {
+		err = customStatus.Err
+	}
+
+	s := status.Convert(err)
+	pb := s.Proto()
+
+	w.Header().Del("Trailer")
+	w.Header().Del("Transfer-Encoding")
+
+	contentType := marshaler.ContentType(pb)
+	w.Header().Set("Content-Type", contentType)
+
+	if s.Code() == codes.Unauthenticated {
+		w.Header().Set("WWW-Authenticate", s.Message())
+	}
+
+	buf, merr := marshaler.Marshal(pb)
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+	// Unless the request includes a TE header field indicating "trailers"
+	// is acceptable, as described in Section 4.3, a server SHOULD NOT
+	// generate trailer fields that it believes are necessary for the user
+	// agent to receive.
+	doForwardTrailers := requestAcceptsTrailers(r)
+
+	if doForwardTrailers {
+		handleForwardResponseTrailerHeader(w, mux, md)
+		w.Header().Set("Transfer-Encoding", "chunked")
+	}
+
+	st := HTTPStatusFromCode(s.Code())
+	if customStatus != nil {
+		st = customStatus.HTTPStatus
+	}
+
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	if doForwardTrailers {
+		handleForwardResponseTrailer(w, mux, md)
+	}
+}
+
+func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
+	return status.Convert(err)
+}
+
+// DefaultRoutingErrorHandler is our default handler for routing errors.
+// By default http error codes mapped on the following error codes:
+//
+//	NotFound -> grpc.NotFound
+//	StatusBadRequest -> grpc.InvalidArgument
+//	MethodNotAllowed -> grpc.Unimplemented
+//	Other -> grpc.Internal, method is not expecting to be called for anything else
+func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
+	sterr := status.Error(codes.Internal, "Unexpected routing error")
+	switch httpStatus {
+	case http.StatusBadRequest:
+		sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
+	case http.StatusMethodNotAllowed:
+		sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
+	case http.StatusNotFound:
+		sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
+	}
+	mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
new file mode 100644
index 0000000000000000000000000000000000000000..19d9d37fff9a05aeddd9c24d7b3e9c4679bc0223
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -0,0 +1,166 @@
+package runtime
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
+	fd := fields.ByName(protoreflect.Name(name))
+	if fd != nil {
+		return fd
+	}
+
+	return fields.ByJSONName(name)
+}
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
+	fm := &field_mask.FieldMask{}
+	var root interface{}
+
+	if err := json.NewDecoder(r).Decode(&root); err != nil {
+		if errors.Is(err, io.EOF) {
+			return fm, nil
+		}
+		return nil, err
+	}
+
+	queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
+	for len(queue) > 0 {
+		// dequeue an item
+		item := queue[0]
+		queue = queue[1:]
+
+		m, ok := item.node.(map[string]interface{})
+		switch {
+		case ok:
+			// if the item is an object, then enqueue all of its children
+			for k, v := range m {
+				if item.msg == nil {
+					return nil, errors.New("JSON structure did not match request type")
+				}
+
+				fd := getFieldByName(item.msg.Descriptor().Fields(), k)
+				if fd == nil {
+					return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
+				}
+
+				if isDynamicProtoMessage(fd.Message()) {
+					for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
+						newPath := p
+						if item.path != "" {
+							newPath = item.path + "." + newPath
+						}
+						queue = append(queue, fieldMaskPathItem{path: newPath})
+					}
+					continue
+				}
+
+				if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
+					_, hasTypeField := v.(map[string]interface{})["@type"]
+					if hasTypeField {
+						queue = append(queue, fieldMaskPathItem{path: k})
+						continue
+					} else {
+						return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
+					}
+
+				}
+
+				child := fieldMaskPathItem{
+					node: v,
+				}
+				if item.path == "" {
+					child.path = string(fd.FullName().Name())
+				} else {
+					child.path = item.path + "." + string(fd.FullName().Name())
+				}
+
+				switch {
+				case fd.IsList(), fd.IsMap():
+					// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
+					// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
+					fm.Paths = append(fm.Paths, child.path)
+				case fd.Message() != nil:
+					child.msg = item.msg.Get(fd).Message()
+					fallthrough
+				default:
+					queue = append(queue, child)
+				}
+			}
+		case len(item.path) > 0:
+			// otherwise, it's a leaf node so print its path
+			fm.Paths = append(fm.Paths, item.path)
+		}
+	}
+
+	// Sort for deterministic output in the presence
+	// of repeated fields.
+	sort.Strings(fm.Paths)
+
+	return fm, nil
+}
+
+func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
+	return md != nil && (md.FullName() == "google.protobuf.Any")
+}
+
+func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
+	return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
+}
+
+// buildPathsBlindly does not attempt to match proto field names to the
+// json value keys.  Instead it relies completely on the structure of
+// the unmarshalled json contained within in.
+// Returns a slice containing all subpaths with the root at the
+// passed in name and json value.
+func buildPathsBlindly(name string, in interface{}) []string {
+	m, ok := in.(map[string]interface{})
+	if !ok {
+		return []string{name}
+	}
+
+	var paths []string
+	queue := []fieldMaskPathItem{{path: name, node: m}}
+	for len(queue) > 0 {
+		cur := queue[0]
+		queue = queue[1:]
+
+		m, ok := cur.node.(map[string]interface{})
+		if !ok {
+			// This should never happen since we should always check that we only add
+			// nodes of type map[string]interface{} to the queue.
+			continue
+		}
+		for k, v := range m {
+			if mi, ok := v.(map[string]interface{}); ok {
+				queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
+			} else {
+				// This is not a struct, so there are no more levels to descend.
+				curPath := cur.path + "." + k
+				paths = append(paths, curPath)
+			}
+		}
+	}
+	return paths
+}
+
+// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+	// the list of prior fields leading up to node connected by dots
+	path string
+
+	// a generic decoded json object the current item to inspect for further path extraction
+	node interface{}
+
+	// parent message
+	msg protoreflect.Message
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..5e14cf8b0e299b641fc3a007e2728f3d8db5a5de
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -0,0 +1,227 @@
+package runtime
+
+import (
+	"context"
+	"errors"
+	"io"
+	"net/http"
+	"net/textproto"
+	"strings"
+
+	"google.golang.org/genproto/googleapis/api/httpbody"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	f, ok := w.(http.Flusher)
+	if !ok {
+		grpclog.Infof("Flush not supported in %T", w)
+		http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+		http.Error(w, "unexpected error", http.StatusInternalServerError)
+		return
+	}
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	w.Header().Set("Transfer-Encoding", "chunked")
+	if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	var delimiter []byte
+	if d, ok := marshaler.(Delimited); ok {
+		delimiter = d.Delimiter()
+	} else {
+		delimiter = []byte("\n")
+	}
+
+	var wroteHeader bool
+	for {
+		resp, err := recv()
+		if errors.Is(err, io.EOF) {
+			return
+		}
+		if err != nil {
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+			return
+		}
+		if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+			return
+		}
+
+		if !wroteHeader {
+			w.Header().Set("Content-Type", marshaler.ContentType(resp))
+		}
+
+		var buf []byte
+		httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
+		switch {
+		case resp == nil:
+			buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
+		case isHTTPBody:
+			buf = httpBody.GetData()
+		default:
+			result := map[string]interface{}{"result": resp}
+			if rb, ok := resp.(responseBody); ok {
+				result["result"] = rb.XXX_ResponseBody()
+			}
+
+			buf, err = marshaler.Marshal(result)
+		}
+
+		if err != nil {
+			grpclog.Infof("Failed to marshal response chunk: %v", err)
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+			return
+		}
+		if _, err := w.Write(buf); err != nil {
+			grpclog.Infof("Failed to send response chunk: %v", err)
+			return
+		}
+		wroteHeader = true
+		if _, err := w.Write(delimiter); err != nil {
+			grpclog.Infof("Failed to send delimiter chunk: %v", err)
+			return
+		}
+		f.Flush()
+	}
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k, vs := range md.HeaderMD {
+		if h, ok := mux.outgoingHeaderMatcher(k); ok {
+			for _, v := range vs {
+				w.Header().Add(h, v)
+			}
+		}
+	}
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k := range md.TrailerMD {
+		if h, ok := mux.outgoingTrailerMatcher(k); ok {
+			w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
+		}
+	}
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k, vs := range md.TrailerMD {
+		if h, ok := mux.outgoingTrailerMatcher(k); ok {
+			for _, v := range vs {
+				w.Header().Add(h, v)
+			}
+		}
+	}
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+	XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+	// Unless the request includes a TE header field indicating "trailers"
+	// is acceptable, as described in Section 4.3, a server SHOULD NOT
+	// generate trailer fields that it believes are necessary for the user
+	// agent to receive.
+	doForwardTrailers := requestAcceptsTrailers(req)
+
+	if doForwardTrailers {
+		handleForwardResponseTrailerHeader(w, mux, md)
+		w.Header().Set("Transfer-Encoding", "chunked")
+	}
+
+	contentType := marshaler.ContentType(resp)
+	w.Header().Set("Content-Type", contentType)
+
+	if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+	var buf []byte
+	var err error
+	if rb, ok := resp.(responseBody); ok {
+		buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+	} else {
+		buf, err = marshaler.Marshal(resp)
+	}
+	if err != nil {
+		grpclog.Infof("Marshal error: %v", err)
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	if _, err = w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	if doForwardTrailers {
+		handleForwardResponseTrailer(w, mux, md)
+	}
+}
+
+func requestAcceptsTrailers(req *http.Request) bool {
+	te := req.Header.Get("TE")
+	return strings.Contains(strings.ToLower(te), "trailers")
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+	if len(opts) == 0 {
+		return nil
+	}
+	for _, opt := range opts {
+		if err := opt(ctx, w, resp); err != nil {
+			grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+			return err
+		}
+	}
+	return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
+	st := mux.streamErrorHandler(ctx, err)
+	msg := errorChunk(st)
+	if !wroteHeader {
+		w.Header().Set("Content-Type", marshaler.ContentType(msg))
+		w.WriteHeader(HTTPStatusFromCode(st.Code()))
+	}
+	buf, err := marshaler.Marshal(msg)
+	if err != nil {
+		grpclog.Infof("Failed to marshal an error: %v", err)
+		return
+	}
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to notify error to client: %v", err)
+		return
+	}
+	if _, err := w.Write(delimiter); err != nil {
+		grpclog.Infof("Failed to send delimiter chunk: %v", err)
+		return
+	}
+}
+
+func errorChunk(st *status.Status) map[string]proto.Message {
+	return map[string]proto.Message{"error": st.Proto()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
new file mode 100644
index 0000000000000000000000000000000000000000..6de2e220c7fc591ff72b10a88f4f786f7b85d99f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,32 @@
+package runtime
+
+import (
+	"google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+	Marshaler
+}
+
+// ContentType returns its specified content type in case v is a
+// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
+// content type.
+func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.GetContentType()
+	}
+	return h.Marshaler.ContentType(v)
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.GetData(), nil
+	}
+	return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6aa82578369a7d4c3d32820523f44a91285a3e4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
@@ -0,0 +1,45 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType(_ interface{}) string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+	return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+	return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
new file mode 100644
index 0000000000000000000000000000000000000000..51b8247da2a0f1dd16ac55edd920519f250b1ed6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -0,0 +1,348 @@
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
+// It supports the full functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb struct {
+	protojson.MarshalOptions
+	protojson.UnmarshalOptions
+}
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType(_ interface{}) string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+	if _, ok := v.(proto.Message); !ok {
+		return j.marshalNonProtoField(v)
+	}
+
+	var buf bytes.Buffer
+	if err := j.marshalTo(&buf, v); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		buf, err := j.marshalNonProtoField(v)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(buf)
+		return err
+	}
+	b, err := j.MarshalOptions.Marshal(p)
+	if err != nil {
+		return err
+	}
+
+	_, err = w.Write(b)
+	return err
+}
+
+var (
+	// protoMessageType is stored to prevent constant lookup of the same type at runtime.
+	protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshal arbitrary data structures into JSON,
+// it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+	if v == nil {
+		return []byte("null"), nil
+	}
+	rv := reflect.ValueOf(v)
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return []byte("null"), nil
+		}
+		rv = rv.Elem()
+	}
+
+	if rv.Kind() == reflect.Slice {
+		if rv.IsNil() {
+			if j.EmitUnpopulated {
+				return []byte("[]"), nil
+			}
+			return []byte("null"), nil
+		}
+
+		if rv.Type().Elem().Implements(protoMessageType) {
+			var buf bytes.Buffer
+			if err := buf.WriteByte('['); err != nil {
+				return nil, err
+			}
+			for i := 0; i < rv.Len(); i++ {
+				if i != 0 {
+					if err := buf.WriteByte(','); err != nil {
+						return nil, err
+					}
+				}
+				if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+					return nil, err
+				}
+			}
+			if err := buf.WriteByte(']'); err != nil {
+				return nil, err
+			}
+
+			return buf.Bytes(), nil
+		}
+
+		if rv.Type().Elem().Implements(typeProtoEnum) {
+			var buf bytes.Buffer
+			if err := buf.WriteByte('['); err != nil {
+				return nil, err
+			}
+			for i := 0; i < rv.Len(); i++ {
+				if i != 0 {
+					if err := buf.WriteByte(','); err != nil {
+						return nil, err
+					}
+				}
+				var err error
+				if j.UseEnumNumbers {
+					_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
+				} else {
+					_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
+				}
+				if err != nil {
+					return nil, err
+				}
+			}
+			if err := buf.WriteByte(']'); err != nil {
+				return nil, err
+			}
+
+			return buf.Bytes(), nil
+		}
+	}
+
+	if rv.Kind() == reflect.Map {
+		m := make(map[string]*json.RawMessage)
+		for _, k := range rv.MapKeys() {
+			buf, err := j.Marshal(rv.MapIndex(k).Interface())
+			if err != nil {
+				return nil, err
+			}
+			m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+		}
+		if j.Indent != "" {
+			return json.MarshalIndent(m, "", j.Indent)
+		}
+		return json.Marshal(m)
+	}
+	if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
+		return json.Marshal(enum.String())
+	}
+	return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+	return unmarshalJSONPb(data, j.UnmarshalOptions, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+	d := json.NewDecoder(r)
+	return DecoderWrapper{
+		Decoder:          d,
+		UnmarshalOptions: j.UnmarshalOptions,
+	}
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+	*json.Decoder
+	protojson.UnmarshalOptions
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+	return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+	return EncoderFunc(func(v interface{}) error {
+		if err := j.marshalTo(w, v); err != nil {
+			return err
+		}
+		// mimic json.Encoder by adding a newline (makes output
+		// easier to read when it contains multiple encoded items)
+		_, err := w.Write(j.Delimiter())
+		return err
+	})
+}
+
+func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+	d := json.NewDecoder(bytes.NewReader(data))
+	return decodeJSONPb(d, unmarshaler, v)
+}
+
+func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		return decodeNonProtoField(d, unmarshaler, v)
+	}
+
+	// Decode into bytes for marshalling
+	var b json.RawMessage
+	if err := d.Decode(&b); err != nil {
+		return err
+	}
+
+	return unmarshaler.Unmarshal([]byte(b), p)
+}
+
+func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return fmt.Errorf("%T is not a pointer", v)
+	}
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			rv.Set(reflect.New(rv.Type().Elem()))
+		}
+		if rv.Type().ConvertibleTo(typeProtoMessage) {
+			// Decode into bytes for marshalling
+			var b json.RawMessage
+			if err := d.Decode(&b); err != nil {
+				return err
+			}
+
+			return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
+		}
+		rv = rv.Elem()
+	}
+	if rv.Kind() == reflect.Map {
+		if rv.IsNil() {
+			rv.Set(reflect.MakeMap(rv.Type()))
+		}
+		conv, ok := convFromType[rv.Type().Key().Kind()]
+		if !ok {
+			return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+		}
+
+		m := make(map[string]*json.RawMessage)
+		if err := d.Decode(&m); err != nil {
+			return err
+		}
+		for k, v := range m {
+			result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+			if err := result[1].Interface(); err != nil {
+				return err.(error)
+			}
+			bk := result[0]
+			bv := reflect.New(rv.Type().Elem())
+			if v == nil {
+				null := json.RawMessage("null")
+				v = &null
+			}
+			if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
+				return err
+			}
+			rv.SetMapIndex(bk, bv.Elem())
+		}
+		return nil
+	}
+	if rv.Kind() == reflect.Slice {
+		if rv.Type().Elem().Kind() == reflect.Uint8 {
+			var sl []byte
+			if err := d.Decode(&sl); err != nil {
+				return err
+			}
+			if sl != nil {
+				rv.SetBytes(sl)
+			}
+			return nil
+		}
+
+		var sl []json.RawMessage
+		if err := d.Decode(&sl); err != nil {
+			return err
+		}
+		if sl != nil {
+			rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
+		}
+		for _, item := range sl {
+			bv := reflect.New(rv.Type().Elem())
+			if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
+				return err
+			}
+			rv.Set(reflect.Append(rv, bv.Elem()))
+		}
+		return nil
+	}
+	if _, ok := rv.Interface().(protoEnum); ok {
+		var repr interface{}
+		if err := d.Decode(&repr); err != nil {
+			return err
+		}
+		switch v := repr.(type) {
+		case string:
+			// TODO(yugui) Should use proto.StructProperties?
+			return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+		case float64:
+			rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
+			return nil
+		default:
+			return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+		}
+	}
+	return d.Decode(v)
+}
+
+type protoEnum interface {
+	fmt.Stringer
+	EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+	return []byte("\n")
+}
+
+var (
+	convFromType = map[reflect.Kind]reflect.Value{
+		reflect.String:  reflect.ValueOf(String),
+		reflect.Bool:    reflect.ValueOf(Bool),
+		reflect.Float64: reflect.ValueOf(Float64),
+		reflect.Float32: reflect.ValueOf(Float32),
+		reflect.Int64:   reflect.ValueOf(Int64),
+		reflect.Int32:   reflect.ValueOf(Int32),
+		reflect.Uint64:  reflect.ValueOf(Uint64),
+		reflect.Uint32:  reflect.ValueOf(Uint32),
+		reflect.Slice:   reflect.ValueOf(Bytes),
+	}
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
new file mode 100644
index 0000000000000000000000000000000000000000..398c780dc226b75f1a0ef9856cfc0eab68d53f8e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -0,0 +1,60 @@
+package runtime
+
+import (
+	"errors"
+	"io"
+
+	"google.golang.org/protobuf/proto"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType(_ interface{}) string {
+	return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return nil, errors.New("unable to marshal non proto field")
+	}
+	return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return errors.New("unable to unmarshal non proto field")
+	}
+	return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+	return DecoderFunc(func(value interface{}) error {
+		buffer, err := io.ReadAll(reader)
+		if err != nil {
+			return err
+		}
+		return marshaller.Unmarshal(buffer, value)
+	})
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+	return EncoderFunc(func(value interface{}) error {
+		buffer, err := marshaller.Marshal(value)
+		if err != nil {
+			return err
+		}
+		if _, err := writer.Write(buffer); err != nil {
+			return err
+		}
+
+		return nil
+	})
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c0d25ff4935ad5d22b11084cd30c0d5bd4e2c55
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
@@ -0,0 +1,50 @@
+package runtime
+
+import (
+	"io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+	// Marshal marshals "v" into byte sequence.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal unmarshals "data" into "v".
+	// "v" must be a pointer value.
+	Unmarshal(data []byte, v interface{}) error
+	// NewDecoder returns a Decoder which reads byte sequence from "r".
+	NewDecoder(r io.Reader) Decoder
+	// NewEncoder returns an Encoder which writes bytes sequence into "w".
+	NewEncoder(w io.Writer) Encoder
+	// ContentType returns the Content-Type which this marshaler is responsible for.
+	// The parameter describes the type which is being marshalled, which can sometimes
+	// affect the content type returned.
+	ContentType(v interface{}) string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+	Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+	Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+	// Delimiter returns the record separator for the stream.
+	Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
new file mode 100644
index 0000000000000000000000000000000000000000..a714de024068232c29446a43f73db861c86bce60
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
@@ -0,0 +1,109 @@
+package runtime
+
+import (
+	"errors"
+	"mime"
+	"net/http"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/protobuf/encoding/protojson"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+	acceptHeader      = http.CanonicalHeaderKey("Accept")
+	contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+	defaultMarshaler = &HTTPBodyMarshaler{
+		Marshaler: &JSONPb{
+			MarshalOptions: protojson.MarshalOptions{
+				EmitUnpopulated: true,
+			},
+			UnmarshalOptions: protojson.UnmarshalOptions{
+				DiscardUnknown: true,
+			},
+		},
+	}
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+	for _, acceptVal := range r.Header[acceptHeader] {
+		if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+			outbound = m
+			break
+		}
+	}
+
+	for _, contentTypeVal := range r.Header[contentTypeHeader] {
+		contentType, _, err := mime.ParseMediaType(contentTypeVal)
+		if err != nil {
+			grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
+			continue
+		}
+		if m, ok := mux.marshalers.mimeMap[contentType]; ok {
+			inbound = m
+			break
+		}
+	}
+
+	if inbound == nil {
+		inbound = mux.marshalers.mimeMap[MIMEWildcard]
+	}
+	if outbound == nil {
+		outbound = inbound
+	}
+
+	return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+	mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+	if len(mime) == 0 {
+		return errors.New("empty MIME type")
+	}
+
+	m.mimeMap[mime] = marshaler
+
+	return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+	return marshalerRegistry{
+		mimeMap: map[string]Marshaler{
+			MIMEWildcard: defaultMarshaler,
+		},
+	}
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+	return func(mux *ServeMux) {
+		if err := mux.marshalers.add(mime, marshaler); err != nil {
+			panic(err)
+		}
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
new file mode 100644
index 0000000000000000000000000000000000000000..628e1fde1cb7b0de8333aab5896c2efa627fec29
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -0,0 +1,486 @@
+package runtime
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/textproto"
+	"regexp"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
+type UnescapingMode int
+
+const (
+	// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
+	// path string before doing any routing.
+	UnescapingModeLegacy UnescapingMode = iota
+
+	// UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
+	// reserved characters.
+	UnescapingModeAllExceptReserved
+
+	// UnescapingModeAllExceptSlash unescapes URL path parameters except path
+	// separators, which will be left as "%2F".
+	UnescapingModeAllExceptSlash
+
+	// UnescapingModeAllCharacters unescapes all URL path parameters.
+	UnescapingModeAllCharacters
+
+	// UnescapingModeDefault is the default escaping type.
+	// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
+	// reference implementation
+	UnescapingModeDefault = UnescapingModeLegacy
+)
+
+var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+	// handlers maps HTTP method to a list of handlers.
+	handlers                  map[string][]handler
+	forwardResponseOptions    []func(context.Context, http.ResponseWriter, proto.Message) error
+	marshalers                marshalerRegistry
+	incomingHeaderMatcher     HeaderMatcherFunc
+	outgoingHeaderMatcher     HeaderMatcherFunc
+	outgoingTrailerMatcher    HeaderMatcherFunc
+	metadataAnnotators        []func(context.Context, *http.Request) metadata.MD
+	errorHandler              ErrorHandlerFunc
+	streamErrorHandler        StreamErrorHandlerFunc
+	routingErrorHandler       RoutingErrorHandlerFunc
+	disablePathLengthFallback bool
+	unescapingMode            UnescapingMode
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+	}
+}
+
+// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
+// for more information.
+func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.unescapingMode = mode
+	}
+}
+
+// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
+// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
+// done with careful consideration.
+func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		currentQueryParser = queryParameterParser
+	}
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
+// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
+// Other headers are not added to the gRPC metadata.
+func DefaultHeaderMatcher(key string) (string, bool) {
+	switch key = textproto.CanonicalMIMEHeaderKey(key); {
+	case isPermanentHTTPHeader(key):
+		return MetadataPrefix + key, true
+	case strings.HasPrefix(key, MetadataHeaderPrefix):
+		return key[len(MetadataHeaderPrefix):], true
+	}
+	return "", false
+}
+
+func defaultOutgoingHeaderMatcher(key string) (string, bool) {
+	return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+}
+
+func defaultOutgoingTrailerMatcher(key string) (string, bool) {
+	return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	for _, header := range fn.matchedMalformedHeaders() {
+		grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
+	}
+
+	return func(mux *ServeMux) {
+		mux.incomingHeaderMatcher = fn
+	}
+}
+
+// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
+func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
+	if fn == nil {
+		return nil
+	}
+	headers := make([]string, 0)
+	for header := range malformedHTTPHeaders {
+		out, accept := fn(header)
+		if accept && isMalformedHTTPHeader(out) {
+			headers = append(headers, out)
+		}
+	}
+	return headers
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.outgoingHeaderMatcher = fn
+	}
+}
+
+// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.outgoingTrailerMatcher = fn
+	}
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+	}
+}
+
+// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
+//
+// This can be used to configure a custom error response.
+func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.errorHandler = fn
+	}
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.streamErrorHandler = fn
+	}
+}
+
+// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to  handle http routing errors.
+//
+// Method called for errors which can happen before gRPC route selected or executed.
+// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
+func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.routingErrorHandler = fn
+	}
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.disablePathLengthFallback = true
+	}
+}
+
+// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
+// When called the handler will forward the request to the upstream grpc service health check (defined in the
+// gRPC Health Checking Protocol).
+//
+// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
+// to setup the protocol in the grpc server.
+//
+// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
+func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
+	return func(s *ServeMux) {
+		// error can be ignored since pattern is definitely valid
+		_ = s.HandlePath(
+			http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
+			) {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+
+				resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
+					Service: r.URL.Query().Get("service"),
+				})
+				if err != nil {
+					s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+					return
+				}
+
+				w.Header().Set("Content-Type", "application/json")
+
+				if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
+					switch resp.GetStatus() {
+					case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
+						err = status.Error(codes.Unavailable, resp.String())
+					case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
+						err = status.Error(codes.NotFound, resp.String())
+					}
+
+					s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+					return
+				}
+
+				_ = outboundMarshaler.NewEncoder(w).Encode(resp)
+			})
+	}
+}
+
+// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
+//
+// See WithHealthEndpointAt for the general implementation.
+func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
+	return WithHealthEndpointAt(healthCheckClient, "/healthz")
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+	serveMux := &ServeMux{
+		handlers:               make(map[string][]handler),
+		forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+		marshalers:             makeMarshalerMIMERegistry(),
+		errorHandler:           DefaultHTTPErrorHandler,
+		streamErrorHandler:     DefaultStreamErrorHandler,
+		routingErrorHandler:    DefaultRoutingErrorHandler,
+		unescapingMode:         UnescapingModeDefault,
+	}
+
+	for _, opt := range opts {
+		opt(serveMux)
+	}
+
+	if serveMux.incomingHeaderMatcher == nil {
+		serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+	}
+	if serveMux.outgoingHeaderMatcher == nil {
+		serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
+	}
+	if serveMux.outgoingTrailerMatcher == nil {
+		serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
+	}
+
+	return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+	s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
+}
+
+// HandlePath allows users to configure custom path handlers.
+// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
+func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
+	compiler, err := httprule.Parse(pathPattern)
+	if err != nil {
+		return fmt.Errorf("parsing path pattern: %w", err)
+	}
+	tp := compiler.Compile()
+	pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
+	if err != nil {
+		return fmt.Errorf("creating new pattern: %w", err)
+	}
+	s.Handle(meth, pattern, h)
+	return nil
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+
+	path := r.URL.Path
+	if !strings.HasPrefix(path, "/") {
+		_, outboundMarshaler := MarshalerForRequest(s, r)
+		s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
+		return
+	}
+
+	// TODO(v3): remove UnescapingModeLegacy
+	if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
+		path = r.URL.RawPath
+	}
+
+	if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+		r.Method = strings.ToUpper(override)
+		if err := r.ParseForm(); err != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.InvalidArgument, err.Error())
+			s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			return
+		}
+	}
+
+	var pathComponents []string
+	// since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
+	// in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
+	// path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
+	// behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
+	if s.unescapingMode == UnescapingModeAllCharacters {
+		pathComponents = encodedPathSplitter.Split(path[1:], -1)
+	} else {
+		pathComponents = strings.Split(path[1:], "/")
+	}
+
+	lastPathComponent := pathComponents[len(pathComponents)-1]
+
+	for _, h := range s.handlers[r.Method] {
+		// If the pattern has a verb, explicitly look for a suffix in the last
+		// component that matches a colon plus the verb. This allows us to
+		// handle some cases that otherwise can't be correctly handled by the
+		// former LastIndex case, such as when the verb literal itself contains
+		// a colon. This should work for all cases that have run through the
+		// parser because we know what verb we're looking for, however, there
+		// are still some cases that the parser itself cannot disambiguate. See
+		// the comment there if interested.
+
+		var verb string
+		patVerb := h.pat.Verb()
+
+		idx := -1
+		if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+			idx = len(lastPathComponent) - len(patVerb) - 1
+		}
+		if idx == 0 {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+			return
+		}
+
+		comps := make([]string, len(pathComponents))
+		copy(comps, pathComponents)
+
+		if idx > 0 {
+			comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+		}
+
+		pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+		if err != nil {
+			var mse MalformedSequenceError
+			if ok := errors.As(err, &mse); ok {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+					HTTPStatus: http.StatusBadRequest,
+					Err:        mse,
+				})
+			}
+			continue
+		}
+		h.h(w, r, pathParams)
+		return
+	}
+
+	// if no handler has found for the request, lookup for other methods
+	// to handle POST -> GET fallback if the request is subject to path
+	// length fallback.
+	// Note we are not eagerly checking the request here as we want to return the
+	// right HTTP status code, and we need to process the fallback candidates in
+	// order to do that.
+	for m, handlers := range s.handlers {
+		if m == r.Method {
+			continue
+		}
+		for _, h := range handlers {
+			var verb string
+			patVerb := h.pat.Verb()
+
+			idx := -1
+			if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+				idx = len(lastPathComponent) - len(patVerb) - 1
+			}
+
+			comps := make([]string, len(pathComponents))
+			copy(comps, pathComponents)
+
+			if idx > 0 {
+				comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+			}
+
+			pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+			if err != nil {
+				var mse MalformedSequenceError
+				if ok := errors.As(err, &mse); ok {
+					_, outboundMarshaler := MarshalerForRequest(s, r)
+					s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+						HTTPStatus: http.StatusBadRequest,
+						Err:        mse,
+					})
+				}
+				continue
+			}
+
+			// X-HTTP-Method-Override is optional. Always allow fallback to POST.
+			// Also, only consider POST -> GET fallbacks, and avoid falling back to
+			// potentially dangerous operations like DELETE.
+			if s.isPathLengthFallback(r) && m == http.MethodGet {
+				if err := r.ParseForm(); err != nil {
+					_, outboundMarshaler := MarshalerForRequest(s, r)
+					sterr := status.Error(codes.InvalidArgument, err.Error())
+					s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+					return
+				}
+				h.h(w, r, pathParams)
+				return
+			}
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
+			return
+		}
+	}
+
+	_, outboundMarshaler := MarshalerForRequest(s, r)
+	s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+	return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+	return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+	pat Pattern
+	h   HandlerFunc
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f90d15a5620f2ecea4dc5240600fa29fcb026aa
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -0,0 +1,381 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+	ErrNotMatch = errors.New("not match to the path pattern")
+	// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+	ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type MalformedSequenceError string
+
+func (e MalformedSequenceError) Error() string {
+	return "malformed path escape " + strconv.Quote(string(e))
+}
+
+type op struct {
+	code    utilities.OpCode
+	operand int
+}
+
+// Pattern is a template pattern of http request paths defined in
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
+type Pattern struct {
+	// ops is a list of operations
+	ops []op
+	// pool is a constant pool indexed by the operands or vars.
+	pool []string
+	// vars is a list of variables names to be bound by this pattern
+	vars []string
+	// stacksize is the max depth of the stack
+	stacksize int
+	// tailLen is the length of the fixed-size segments after a deep wildcard
+	tailLen int
+	// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+	verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+	if version != 1 {
+		grpclog.Infof("unsupported version: %d", version)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	l := len(ops)
+	if l%2 != 0 {
+		grpclog.Infof("odd number of ops codes: %d", l)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	var (
+		typedOps        []op
+		stack, maxstack int
+		tailLen         int
+		pushMSeen       bool
+		vars            []string
+	)
+	for i := 0; i < l; i += 2 {
+		op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpPushM:
+			if pushMSeen {
+				grpclog.Infof("pushM appears twice")
+				return Pattern{}, ErrInvalidPattern
+			}
+			pushMSeen = true
+			stack++
+		case utilities.OpLitPush:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("negative literal index: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpConcatN:
+			if op.operand <= 0 {
+				grpclog.Infof("negative concat size: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack -= op.operand
+			if stack < 0 {
+				grpclog.Info("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack++
+		case utilities.OpCapture:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("variable name index out of bound: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			v := pool[op.operand]
+			op.operand = len(vars)
+			vars = append(vars, v)
+			stack--
+			if stack < 0 {
+				grpclog.Infof("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+		default:
+			grpclog.Infof("invalid opcode: %d", op.code)
+			return Pattern{}, ErrInvalidPattern
+		}
+
+		if maxstack < stack {
+			maxstack = stack
+		}
+		typedOps = append(typedOps, op)
+	}
+	return Pattern{
+		ops:       typedOps,
+		pool:      pool,
+		vars:      vars,
+		stacksize: maxstack,
+		tailLen:   tailLen,
+		verb:      verb,
+	}, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+	if err != nil {
+		grpclog.Fatalf("Pattern initialization failed: %v", err)
+	}
+	return p
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// MatchAndEscape will return an error if no Patterns matched or if a pattern
+// matched but contained malformed escape sequences. If successful, the function
+// returns a mapping from field paths to their captured values.
+func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
+	if p.verb != verb {
+		if p.verb != "" {
+			return nil, ErrNotMatch
+		}
+		if len(components) == 0 {
+			components = []string{":" + verb}
+		} else {
+			components = append([]string{}, components...)
+			components[len(components)-1] += ":" + verb
+		}
+	}
+
+	var pos int
+	stack := make([]string, 0, p.stacksize)
+	captured := make([]string, len(p.vars))
+	l := len(components)
+	for _, op := range p.ops {
+		var err error
+
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush, utilities.OpLitPush:
+			if pos >= l {
+				return nil, ErrNotMatch
+			}
+			c := components[pos]
+			if op.code == utilities.OpLitPush {
+				if lit := p.pool[op.operand]; c != lit {
+					return nil, ErrNotMatch
+				}
+			} else if op.code == utilities.OpPush {
+				if c, err = unescape(c, unescapingMode, false); err != nil {
+					return nil, err
+				}
+			}
+			stack = append(stack, c)
+			pos++
+		case utilities.OpPushM:
+			end := len(components)
+			if end < pos+p.tailLen {
+				return nil, ErrNotMatch
+			}
+			end -= p.tailLen
+			c := strings.Join(components[pos:end], "/")
+			if c, err = unescape(c, unescapingMode, true); err != nil {
+				return nil, err
+			}
+			stack = append(stack, c)
+			pos = end
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			captured[op.operand] = stack[n]
+			stack = stack[:n]
+		}
+	}
+	if pos < l {
+		return nil, ErrNotMatch
+	}
+	bindings := make(map[string]string)
+	for i, val := range captured {
+		bindings[p.vars[i]] = val
+	}
+	return bindings, nil
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// It will never perform per-component unescaping (see: UnescapingModeLegacy).
+// MatchAndEscape will return an error if no Patterns matched. If successful,
+// the function returns a mapping from field paths to their captured values.
+//
+// Deprecated: Use MatchAndEscape.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+	return p.MatchAndEscape(components, verb, UnescapingModeDefault)
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+	var stack []string
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			stack = append(stack, "*")
+		case utilities.OpLitPush:
+			stack = append(stack, p.pool[op.operand])
+		case utilities.OpPushM:
+			stack = append(stack, "**")
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+		}
+	}
+	segs := strings.Join(stack, "/")
+	if p.verb != "" {
+		return fmt.Sprintf("/%s:%s", segs, p.verb)
+	}
+	return "/" + segs
+}
+
+/*
+ * The following code is adopted and modified from Go's standard library
+ * and carries the attached license.
+ *
+ *     Copyright 2009 The Go Authors. All rights reserved.
+ *     Use of this source code is governed by a BSD-style
+ *     license that can be found in the LICENSE file.
+ */
+
+// ishex returns whether or not the given byte is a valid hex character
+func ishex(c byte) bool {
+	switch {
+	case '0' <= c && c <= '9':
+		return true
+	case 'a' <= c && c <= 'f':
+		return true
+	case 'A' <= c && c <= 'F':
+		return true
+	}
+	return false
+}
+
+func isRFC6570Reserved(c byte) bool {
+	switch c {
+	case '!', '#', '$', '&', '\'', '(', ')', '*',
+		'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+		return true
+	default:
+		return false
+	}
+}
+
+// unhex converts a hex point to the bit representation
+func unhex(c byte) byte {
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0'
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10
+	}
+	return 0
+}
+
+// shouldUnescapeWithMode returns true if the character is escapable with the
+// given mode
+func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
+	switch mode {
+	case UnescapingModeAllExceptReserved:
+		if isRFC6570Reserved(c) {
+			return false
+		}
+	case UnescapingModeAllExceptSlash:
+		if c == '/' {
+			return false
+		}
+	case UnescapingModeAllCharacters:
+		return true
+	}
+	return true
+}
+
+// unescape unescapes a path string using the provided mode
+func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
+	// TODO(v3): remove UnescapingModeLegacy
+	if mode == UnescapingModeLegacy {
+		return s, nil
+	}
+
+	if !multisegment {
+		mode = UnescapingModeAllCharacters
+	}
+
+	// Count %, check that they're well-formed.
+	n := 0
+	for i := 0; i < len(s); {
+		if s[i] == '%' {
+			n++
+			if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+				s = s[i:]
+				if len(s) > 3 {
+					s = s[:3]
+				}
+
+				return "", MalformedSequenceError(s)
+			}
+			i += 3
+		} else {
+			i++
+		}
+	}
+
+	if n == 0 {
+		return s, nil
+	}
+
+	var t strings.Builder
+	t.Grow(len(s))
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '%':
+			c := unhex(s[i+1])<<4 | unhex(s[i+2])
+			if shouldUnescapeWithMode(c, mode) {
+				t.WriteByte(c)
+				i += 2
+				continue
+			}
+			fallthrough
+		default:
+			t.WriteByte(s[i])
+		}
+	}
+
+	return t.String(), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
new file mode 100644
index 0000000000000000000000000000000000000000..d549407f20fbf2526b7e8a78f8b18646cb27c4c7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+	"google.golang.org/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+	return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+	b, err := Bool(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+	f, err := Float64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+	f, err := Float32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+	i, err := Int64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+	i, err := Int32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+	i, err := Uint64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+	i, err := Uint32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
new file mode 100644
index 0000000000000000000000000000000000000000..d01933c4fd2032ba451a42d4b8a9f67254c86397
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -0,0 +1,338 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/known/durationpb"
+	field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+	"google.golang.org/protobuf/types/known/structpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
+	"google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
+
+var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
+
+// QueryParameterParser defines interface for all query parameter parsers
+type QueryParameterParser interface {
+	Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
+}
+
+// PopulateQueryParameters parses query parameters
+// into "msg" using current query parser
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	return currentQueryParser.Parse(msg, values, filter)
+}
+
+// DefaultQueryParser is a QueryParameterParser which implements the default
+// query parameters parsing behavior.
+//
+// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
+type DefaultQueryParser struct{}
+
+// Parse populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	for key, values := range values {
+		if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
+			key = match[1]
+			values = append([]string{match[2]}, values...)
+		}
+		fieldPath := strings.Split(key, ".")
+		if filter.HasCommonPrefix(fieldPath) {
+			continue
+		}
+		if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+	fieldPath := strings.Split(fieldPathString, ".")
+	return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
+}
+
+func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
+	if len(fieldPath) < 1 {
+		return errors.New("no field path")
+	}
+	if len(values) < 1 {
+		return errors.New("no value provided")
+	}
+
+	var fieldDescriptor protoreflect.FieldDescriptor
+	for i, fieldName := range fieldPath {
+		fields := msgValue.Descriptor().Fields()
+
+		// Get field by name
+		fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
+		if fieldDescriptor == nil {
+			fieldDescriptor = fields.ByJSONName(fieldName)
+			if fieldDescriptor == nil {
+				// We're not returning an error here because this could just be
+				// an extra query parameter that isn't part of the request.
+				grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
+				return nil
+			}
+		}
+
+		// If this is the last element, we're done
+		if i == len(fieldPath)-1 {
+			break
+		}
+
+		// Only singular message fields are allowed
+		if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
+			return fmt.Errorf("invalid path: %q is not a message", fieldName)
+		}
+
+		// Get the nested message
+		msgValue = msgValue.Mutable(fieldDescriptor).Message()
+	}
+
+	// Check if oneof already set
+	if of := fieldDescriptor.ContainingOneof(); of != nil {
+		if f := msgValue.WhichOneof(of); f != nil {
+			return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
+		}
+	}
+
+	switch {
+	case fieldDescriptor.IsList():
+		return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
+	case fieldDescriptor.IsMap():
+		return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
+	}
+
+	if len(values) > 1 {
+		return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
+	}
+
+	return populateField(fieldDescriptor, msgValue, values[0])
+}
+
+func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
+	v, err := parseField(fieldDescriptor, value)
+	if err != nil {
+		return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
+	}
+
+	msgValue.Set(fieldDescriptor, v)
+	return nil
+}
+
+func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
+	for _, value := range values {
+		v, err := parseField(fieldDescriptor, value)
+		if err != nil {
+			return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
+		}
+		list.Append(v)
+	}
+
+	return nil
+}
+
+func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
+	if len(values) != 2 {
+		return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
+	}
+
+	key, err := parseField(fieldDescriptor.MapKey(), values[0])
+	if err != nil {
+		return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
+	}
+
+	value, err := parseField(fieldDescriptor.MapValue(), values[1])
+	if err != nil {
+		return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
+	}
+
+	mp.Set(key.MapKey(), value)
+
+	return nil
+}
+
+func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
+	switch fieldDescriptor.Kind() {
+	case protoreflect.BoolKind:
+		v, err := strconv.ParseBool(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfBool(v), nil
+	case protoreflect.EnumKind:
+		enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
+		if err != nil {
+			if errors.Is(err, protoregistry.NotFound) {
+				return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+			}
+			return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
+		}
+		// Look for enum by name
+		v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
+		if v == nil {
+			i, err := strconv.Atoi(value)
+			if err != nil {
+				return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+			}
+			// Look for enum by number
+			if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
+				return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+			}
+		}
+		return protoreflect.ValueOfEnum(v.Number()), nil
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		v, err := strconv.ParseInt(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfInt32(int32(v)), nil
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		v, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfInt64(v), nil
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		v, err := strconv.ParseUint(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfUint32(uint32(v)), nil
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		v, err := strconv.ParseUint(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfUint64(v), nil
+	case protoreflect.FloatKind:
+		v, err := strconv.ParseFloat(value, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfFloat32(float32(v)), nil
+	case protoreflect.DoubleKind:
+		v, err := strconv.ParseFloat(value, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfFloat64(v), nil
+	case protoreflect.StringKind:
+		return protoreflect.ValueOfString(value), nil
+	case protoreflect.BytesKind:
+		v, err := Bytes(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfBytes(v), nil
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		return parseMessage(fieldDescriptor.Message(), value)
+	default:
+		panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
+	}
+}
+
+func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
+	var msg proto.Message
+	switch msgDescriptor.FullName() {
+	case "google.protobuf.Timestamp":
+		t, err := time.Parse(time.RFC3339Nano, value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = timestamppb.New(t)
+	case "google.protobuf.Duration":
+		d, err := time.ParseDuration(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = durationpb.New(d)
+	case "google.protobuf.DoubleValue":
+		v, err := strconv.ParseFloat(value, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.Double(v)
+	case "google.protobuf.FloatValue":
+		v, err := strconv.ParseFloat(value, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.Float(float32(v))
+	case "google.protobuf.Int64Value":
+		v, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.Int64(v)
+	case "google.protobuf.Int32Value":
+		v, err := strconv.ParseInt(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.Int32(int32(v))
+	case "google.protobuf.UInt64Value":
+		v, err := strconv.ParseUint(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.UInt64(v)
+	case "google.protobuf.UInt32Value":
+		v, err := strconv.ParseUint(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.UInt32(uint32(v))
+	case "google.protobuf.BoolValue":
+		v, err := strconv.ParseBool(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.Bool(v)
+	case "google.protobuf.StringValue":
+		msg = wrapperspb.String(value)
+	case "google.protobuf.BytesValue":
+		v, err := Bytes(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = wrapperspb.Bytes(v)
+	case "google.protobuf.FieldMask":
+		fm := &field_mask.FieldMask{}
+		fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
+		msg = fm
+	case "google.protobuf.Value":
+		var v structpb.Value
+		if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &v
+	case "google.protobuf.Struct":
+		var v structpb.Struct
+		if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &v
+	default:
+		return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
+	}
+
+	return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
new file mode 100644
index 0000000000000000000000000000000000000000..b89409465773e637b441fc4d13941de9a017d4e5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "utilities",
+    srcs = [
+        "doc.go",
+        "pattern.go",
+        "readerfactory.go",
+        "string_array_flag.go",
+        "trie.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
+)
+
+go_test(
+    name = "utilities_test",
+    size = "small",
+    srcs = [
+        "string_array_flag_test.go",
+        "trie_test.go",
+    ],
+    deps = [":utilities"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":utilities",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf79a4d58860746e77fedcf7e85b8178153b7a05
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfe7de4864ab1b5d2136b82c21d133e7e8798039
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+	// OpNop does nothing
+	OpNop = OpCode(iota)
+	// OpPush pushes a component to stack
+	OpPush
+	// OpLitPush pushes a component to stack if it matches to the literal
+	OpLitPush
+	// OpPushM concatenates the remaining components and pushes it to stack
+	OpPushM
+	// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+	OpConcatN
+	// OpCapture pops an item and binds it to the variable
+	OpCapture
+	// OpEnd is the least positive invalid opcode.
+	OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
new file mode 100644
index 0000000000000000000000000000000000000000..01d26edae3c97801bf01e121e00d8f00189f55a0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -0,0 +1,19 @@
+package utilities
+
+import (
+	"bytes"
+	"io"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+	b, err := io.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+
+	return func() io.Reader {
+		return bytes.NewReader(b)
+	}, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
new file mode 100644
index 0000000000000000000000000000000000000000..d224ab776c0cb4fd7906d6dfd9c4aa8dbeecf2e0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
@@ -0,0 +1,33 @@
+package utilities
+
+import (
+	"flag"
+	"strings"
+)
+
+// flagInterface is an cut down interface to `flag`
+type flagInterface interface {
+	Var(value flag.Value, name string, usage string)
+}
+
+// StringArrayFlag defines a flag with the specified name and usage string.
+// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
+func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
+	value := &StringArrayFlags{}
+	f.Var(value, name, usage)
+	return value
+}
+
+// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
+type StringArrayFlags []string
+
+// String returns a string representation of `StringArrayFlags`
+func (i *StringArrayFlags) String() string {
+	return strings.Join(*i, ",")
+}
+
+// Set appends a value to `StringArrayFlags`
+func (i *StringArrayFlags) Set(value string) error {
+	*i = append(*i, value)
+	return nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd99b0ed25622f80200b2206483999ba29d6e801
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -0,0 +1,174 @@
+package utilities
+
+import (
+	"sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+	// Encoding keeps an encoding from string to int
+	Encoding map[string]int
+	// Base is the base array of Double Array
+	Base []int
+	// Check is the check array of Double Array
+	Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+	da := &DoubleArray{Encoding: make(map[string]int)}
+	if len(seqs) == 0 {
+		return da
+	}
+
+	encoded := registerTokens(da, seqs)
+	sort.Sort(byLex(encoded))
+
+	root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+	addSeqs(da, encoded, 0, root)
+
+	for i := len(da.Base); i > 0; i-- {
+		if da.Check[i-1] != 0 {
+			da.Base = da.Base[:i]
+			da.Check = da.Check[:i]
+			break
+		}
+	}
+	return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+	var result [][]int
+	for _, seq := range seqs {
+		encoded := make([]int, 0, len(seq))
+		for _, token := range seq {
+			if _, ok := da.Encoding[token]; !ok {
+				da.Encoding[token] = len(da.Encoding)
+			}
+			encoded = append(encoded, da.Encoding[token])
+		}
+		result = append(result, encoded)
+	}
+	for i := range result {
+		result[i] = append(result[i], len(da.Encoding))
+	}
+	return result
+}
+
+type node struct {
+	row, col    int
+	left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+	return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+	var result []*node
+	lastVal := int(-1)
+	last := new(node)
+	for i := n.left; i < n.right; i++ {
+		if lastVal == seqs[i][n.col+1] {
+			continue
+		}
+		last.right = i
+		last = &node{
+			row:  i,
+			col:  n.col + 1,
+			left: i,
+		}
+		result = append(result, last)
+	}
+	last.right = n.right
+	return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+	ensureSize(da, pos)
+
+	children := n.children(seqs)
+	var i int
+	for i = 1; ; i++ {
+		ok := func() bool {
+			for _, child := range children {
+				code := child.value(seqs)
+				j := i + code
+				ensureSize(da, j)
+				if da.Check[j] != 0 {
+					return false
+				}
+			}
+			return true
+		}()
+		if ok {
+			break
+		}
+	}
+	da.Base[pos] = i
+	for _, child := range children {
+		code := child.value(seqs)
+		j := i + code
+		da.Check[j] = pos + 1
+	}
+	terminator := len(da.Encoding)
+	for _, child := range children {
+		code := child.value(seqs)
+		if code == terminator {
+			continue
+		}
+		j := i + code
+		addSeqs(da, seqs, j, *child)
+	}
+}
+
+func ensureSize(da *DoubleArray, i int) {
+	for i >= len(da.Base) {
+		da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+		da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+	}
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int      { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+	si := l[i]
+	sj := l[j]
+	var k int
+	for k = 0; k < len(si) && k < len(sj); k++ {
+		if si[k] < sj[k] {
+			return true
+		}
+		if si[k] > sj[k] {
+			return false
+		}
+	}
+	return k < len(sj)
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+	if len(da.Base) == 0 {
+		return false
+	}
+
+	var i int
+	for _, t := range seq {
+		code, ok := da.Encoding[t]
+		if !ok {
+			break
+		}
+		j := da.Base[i] + code
+		if len(da.Check) <= j || da.Check[j] != i+1 {
+			break
+		}
+		i = j
+	}
+	j := da.Base[i] + len(da.Encoding)
+	if len(da.Check) <= j || da.Check[j] != i+1 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml b/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..77285093decb2eed650323e0c73c860b0473ffd9
--- /dev/null
+++ b/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml
@@ -0,0 +1,32 @@
+# .readthedocs.yaml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Set the OS, Python version and other tools you might need
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.11"
+    # You can also specify other tool versions:
+    # nodejs: "19"
+    # rust: "1.64"
+    # golang: "1.19"
+
+# Build documentation in the "docs/" directory with Sphinx
+sphinx:
+   configuration: docs/source/conf.py
+
+# Optionally build your docs in additional formats such as PDF and ePub
+# formats:
+#    - pdf
+#    - epub
+
+# Optional but recommended, declare the Python requirements required
+# to build your documentation
+# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+python:
+   install:
+   - requirements: docs/requirements.txt
\ No newline at end of file
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT b/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT
index 3f56aae6744030e93395ac2c911b16ad5b086db4..c96a21a2a54d84a3aa740c36d7404be116cb2337 100644
--- a/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT
+++ b/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT
@@ -1,6 +1,6 @@
 MIT License
 
-Copyright (c) 2023 IP2Location.com
+Copyright (c) 2023 - 2024 IP2Location.com
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/README.md b/vendor/github.com/ip2location/ip2location-go/v9/README.md
index 85a17306449be264eb91373818a50f00db9591c7..93876e20d1a7cf8e21b30311f900c8bda00110db 100644
--- a/vendor/github.com/ip2location/ip2location-go/v9/README.md
+++ b/vendor/github.com/ip2location/ip2location-go/v9/README.md
@@ -20,503 +20,9 @@ The database will be updated in monthly basis for the greater accuracy. Free LIT
 The paid databases are available at https://www.ip2location.com under Premium subscription package.
 
 As an alternative, this package can also call the IP2Location Web Service. This requires an API key. If you don't have an existing API key, you can subscribe for one at the below:
-
 https://www.ip2location.com/web-service/ip2location
 
-## Installation
-
-```
-go get github.com/ip2location/ip2location-go/v9
-```
-
-## QUERY USING THE BIN FILE
-
-## Dependencies
-
-This package requires IP2Location BIN data file to function. You may download the BIN data file at
-* IP2Location LITE BIN Data (Free): https://lite.ip2location.com
-* IP2Location Commercial BIN Data (Comprehensive): https://www.ip2location.com
-
-
-## IPv4 BIN vs IPv6 BIN
-
-Use the IPv4 BIN file if you just need to query IPv4 addresses.
-
-Use the IPv6 BIN file if you need to query BOTH IPv4 and IPv6 addresses.
-
-
-## Methods
-
-Below are the methods supported in this package.
-
-|Method Name|Description|
-|---|---|
-|OpenDB|Initialize the package with the BIN file.|
-|Get_all|Returns the geolocation information in an object.|
-|Get_country_short|Returns the country code.|
-|Get_country_long|Returns the country name.|
-|Get_region|Returns the region name.|
-|Get_city|Returns the city name.|
-|Get_isp|Returns the ISP name.|
-|Get_latitude|Returns the latitude.|
-|Get_longitude|Returns the longitude.|
-|Get_domain|Returns the domain name.|
-|Get_zipcode|Returns the ZIP code.|
-|Get_timezone|Returns the time zone.|
-|Get_netspeed|Returns the net speed.|
-|Get_iddcode|Returns the IDD code.|
-|Get_areacode|Returns the area code.|
-|Get_weatherstationcode|Returns the weather station code.|
-|Get_weatherstationname|Returns the weather station name.|
-|Get_mcc|Returns the mobile country code.|
-|Get_mnc|Returns the mobile network code.|
-|Get_mobilebrand|Returns the mobile brand.|
-|Get_elevation|Returns the elevation in meters.|
-|Get_usagetype|Returns the usage type.|
-|Get_addresstype|Returns the address type.|
-|Get_category|Returns the IAB category.|
-|Get_district|Returns the district name.|
-|Get_asn|Returns the autonomous system number (ASN).|
-|Get_as|Returns the autonomous system (AS).|
-|Close|Closes BIN file.|
-
-## Usage
-
-```go
-package main
-
-import (
-	"fmt"
-	"github.com/ip2location/ip2location-go/v9"
-)
-
-func main() {
-	db, err := ip2location.OpenDB("./IP-COUNTRY-REGION-CITY-LATITUDE-LONGITUDE-ZIPCODE-TIMEZONE-ISP-DOMAIN-NETSPEED-AREACODE-WEATHER-MOBILE-ELEVATION-USAGETYPE-ADDRESSTYPE-CATEGORY-DISTRICT-ASN.BIN")
-	
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-	ip := "8.8.8.8"
-	results, err := db.Get_all(ip)
-	
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-	
-	fmt.Printf("country_short: %s\n", results.Country_short)
-	fmt.Printf("country_long: %s\n", results.Country_long)
-	fmt.Printf("region: %s\n", results.Region)
-	fmt.Printf("city: %s\n", results.City)
-	fmt.Printf("isp: %s\n", results.Isp)
-	fmt.Printf("latitude: %f\n", results.Latitude)
-	fmt.Printf("longitude: %f\n", results.Longitude)
-	fmt.Printf("domain: %s\n", results.Domain)
-	fmt.Printf("zipcode: %s\n", results.Zipcode)
-	fmt.Printf("timezone: %s\n", results.Timezone)
-	fmt.Printf("netspeed: %s\n", results.Netspeed)
-	fmt.Printf("iddcode: %s\n", results.Iddcode)
-	fmt.Printf("areacode: %s\n", results.Areacode)
-	fmt.Printf("weatherstationcode: %s\n", results.Weatherstationcode)
-	fmt.Printf("weatherstationname: %s\n", results.Weatherstationname)
-	fmt.Printf("mcc: %s\n", results.Mcc)
-	fmt.Printf("mnc: %s\n", results.Mnc)
-	fmt.Printf("mobilebrand: %s\n", results.Mobilebrand)
-	fmt.Printf("elevation: %f\n", results.Elevation)
-	fmt.Printf("usagetype: %s\n", results.Usagetype)
-	fmt.Printf("addresstype: %s\n", results.Addresstype)
-	fmt.Printf("category: %s\n", results.Category)
-	fmt.Printf("district: %s\n", results.District)
-	fmt.Printf("asn: %s\n", results.Asn)
-	fmt.Printf("as: %s\n", results.As)
-	fmt.Printf("api version: %s\n", ip2location.Api_version())
-	
-	db.Close()
-}
-```
-
-## QUERY USING THE IP2LOCATION WEB SERVICE
-
-## Methods
-Below are the methods supported in this package.
-
-|Method Name|Description|
-|---|---|
-|OpenWS| 3 input parameters:<ol><li>IP2Location API Key.</li><li>Package (WS1 - WS25)</li></li><li>Use HTTPS or HTTP</li></ol> |
-|LookUp|Query IP address. This method returns an object containing the geolocation info. <ul><li>country_code</li><li>country_name</li><li>region_name</li><li>city_name</li><li>latitude</li><li>longitude</li><li>zip_code</li><li>time_zone</li><li>isp</li><li>domain</li><li>net_speed</li><li>idd_code</li><li>area_code</li><li>weather_station_code</li><li>weather_station_name</li><li>mcc</li><li>mnc</li><li>mobile_brand</li><li>elevation</li><li>usage_type</li><li>address_type</li><li>category</li><li>continent<ul><li>name</li><li>code</li><li>hemisphere</li><li>translations</li></ul></li><li>country<ul><li>name</li><li>alpha3_code</li><li>numeric_code</li><li>demonym</li><li>flag</li><li>capital</li><li>total_area</li><li>population</li><li>currency<ul><li>code</li><li>name</li><li>symbol</li></ul></li><li>language<ul><li>code</li><li>name</li></ul></li><li>idd_code</li><li>tld</li><li>is_eu</li><li>translations</li></ul></li><li>region<ul><li>name</li><li>code</li><li>translations</li></ul></li><li>city<ul><li>name</li><li>translations</li></ul></li><li>geotargeting<ul><li>metro</li></ul></li><li>country_groupings</li><li>time_zone_info<ul><li>olson</li><li>current_time</li><li>gmt_offset</li><li>is_dst</li><li>sunrise</li><li>sunset</li></ul></li><ul>|
-|GetCredit|This method returns the web service credit balance in an object.|
-
-## Usage
-
-```go
-package main
-
-import (
-	"github.com/ip2location/ip2location-go/v9"
-	"fmt"
-)
-
-func main() {
-	apikey := "YOUR_API_KEY"
-	apipackage := "WS25"
-	usessl := true
-	addon := "continent,country,region,city,geotargeting,country_groupings,time_zone_info" // leave blank if no need
-	lang := "en" // leave blank if no need
-	
-	ws, err := ip2location.OpenWS(apikey, apipackage, usessl)
-	
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-	ip := "8.8.8.8"
-	res, err := ws.LookUp(ip, addon, lang)
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-
-	if res.Response != "OK" {
-		fmt.Printf("Error: %s\n", res.Response)
-	} else {
-		// standard results
-		fmt.Printf("Response: %s\n", res.Response)
-		fmt.Printf("CountryCode: %s\n", res.CountryCode)
-		fmt.Printf("CountryName: %s\n", res.CountryName)
-		fmt.Printf("RegionName: %s\n", res.RegionName)
-		fmt.Printf("CityName: %s\n", res.CityName)
-		fmt.Printf("Latitude: %f\n", res.Latitude)
-		fmt.Printf("Longitude: %f\n", res.Longitude)
-		fmt.Printf("ZipCode: %s\n", res.ZipCode)
-		fmt.Printf("TimeZone: %s\n", res.TimeZone)
-		fmt.Printf("Isp: %s\n", res.Isp)
-		fmt.Printf("Domain: %s\n", res.Domain)
-		fmt.Printf("NetSpeed: %s\n", res.NetSpeed)
-		fmt.Printf("IddCode: %s\n", res.IddCode)
-		fmt.Printf("AreaCode: %s\n", res.AreaCode)
-		fmt.Printf("WeatherStationCode: %s\n", res.WeatherStationCode)
-		fmt.Printf("WeatherStationName: %s\n", res.WeatherStationName)
-		fmt.Printf("Mcc: %s\n", res.Mcc)
-		fmt.Printf("Mnc: %s\n", res.Mnc)
-		fmt.Printf("MobileBrand: %s\n", res.MobileBrand)
-		fmt.Printf("Elevation: %d\n", res.Elevation)
-		fmt.Printf("UsageType: %s\n", res.UsageType)
-		fmt.Printf("AddressType: %s\n", res.AddressType)
-		fmt.Printf("Category: %s\n", res.Category)
-		fmt.Printf("CategoryName: %s\n", res.CategoryName)
-		fmt.Printf("CreditsConsumed: %d\n", res.CreditsConsumed)
-		
-		// continent addon
-		fmt.Printf("Continent => Name: %s\n", res.Continent.Name)
-		fmt.Printf("Continent => Code: %s\n", res.Continent.Code)
-		fmt.Printf("Continent => Hemisphere: %+v\n", res.Continent.Hemisphere)
-		
-		// country addon
-		fmt.Printf("Country => Name: %s\n", res.Country.Name)
-		fmt.Printf("Country => Alpha3Code: %s\n", res.Country.Alpha3Code)
-		fmt.Printf("Country => NumericCode: %s\n", res.Country.NumericCode)
-		fmt.Printf("Country => Demonym: %s\n", res.Country.Demonym)
-		fmt.Printf("Country => Flag: %s\n", res.Country.Flag)
-		fmt.Printf("Country => Capital: %s\n", res.Country.Capital)
-		fmt.Printf("Country => TotalArea: %s\n", res.Country.TotalArea)
-		fmt.Printf("Country => Population: %s\n", res.Country.Population)
-		fmt.Printf("Country => IddCode: %s\n", res.Country.IddCode)
-		fmt.Printf("Country => Tld: %s\n", res.Country.Tld)
-		fmt.Printf("Country => IsEu: %t\n", res.Country.IsEu)
-		
-		fmt.Printf("Country => Currency => Code: %s\n", res.Country.Currency.Code)
-		fmt.Printf("Country => Currency => Name: %s\n", res.Country.Currency.Name)
-		fmt.Printf("Country => Currency => Symbol: %s\n", res.Country.Currency.Symbol)
-		
-		fmt.Printf("Country => Language => Code: %s\n", res.Country.Language.Code)
-		fmt.Printf("Country => Language => Name: %s\n", res.Country.Language.Name)
-		
-		// region addon
-		fmt.Printf("Region => Name: %s\n", res.Region.Name)
-		fmt.Printf("Region => Code: %s\n", res.Region.Code)
-		
-		// city addon
-		fmt.Printf("City => Name: %s\n", res.City.Name)
-		
-		// geotargeting addon
-		fmt.Printf("Geotargeting => Metro: %s\n", res.Geotargeting.Metro)
-		
-		// country_groupings addon
-		for i, s := range res.CountryGroupings {
-			fmt.Printf("CountryGroupings => #%d => Acronym: %s\n", i, s.Acronym)
-			fmt.Printf("CountryGroupings => #%d => Name: %s\n", i, s.Name)
-		}
-		
-		// time_zone_info addon
-		fmt.Printf("TimeZoneInfo => Olson: %s\n", res.TimeZoneInfo.Olson)
-		fmt.Printf("TimeZoneInfo => CurrentTime: %s\n", res.TimeZoneInfo.CurrentTime)
-		fmt.Printf("TimeZoneInfo => GmtOffset: %d\n", res.TimeZoneInfo.GmtOffset)
-		fmt.Printf("TimeZoneInfo => IsDst: %s\n", res.TimeZoneInfo.IsDst)
-		fmt.Printf("TimeZoneInfo => Sunrise: %s\n", res.TimeZoneInfo.Sunrise)
-		fmt.Printf("TimeZoneInfo => Sunset: %s\n", res.TimeZoneInfo.Sunset)
-	}
-
-	res2, err := ws.GetCredit()
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-	
-	fmt.Printf("Credit Balance: %d\n", res2.Response)
-}
-```
-
-## IPTOOLS CLASS
-
-## Methods
-Below are the methods supported in this package.
-
-|Method Name|Description|
-|---|---|
-|func (t *IPTools) IsIPv4(IP string) bool|Returns true if string contains an IPv4 address. Otherwise false.|
-|func (t *IPTools) IsIPv6(IP string) bool|Returns true if string contains an IPv6 address. Otherwise false.|
-|func (t *IPTools) IPv4ToDecimal(IP string) (*big.Int, error)|Returns the IP number for an IPv4 address.|
-|func (t *IPTools) IPv6ToDecimal(IP string) (*big.Int, error)|Returns the IP number for an IPv6 address.|
-|func (t *IPTools) DecimalToIPv4(IPNum *big.Int) (string, error)|Returns the IPv4 address for the supplied IP number.|
-|func (t *IPTools) DecimalToIPv6(IPNum *big.Int) (string, error)|Returns the IPv6 address for the supplied IP number.|
-|func (t *IPTools) CompressIPv6(IP string) (string, error)|Returns the IPv6 address in compressed form.|
-|func (t *IPTools) ExpandIPv6(IP string) (string, error)|Returns the IPv6 address in expanded form.|
-|func (t *IPTools) IPv4ToCIDR(IPFrom string, IPTo string) ([]string, error)|Returns a list of CIDR from the supplied IPv4 range.|
-|func (t *IPTools) IPv6ToCIDR(IPFrom string, IPTo string) ([]string, error)|Returns a list of CIDR from the supplied IPv6 range.|
-|func (t *IPTools) CIDRToIPv4(CIDR string) ([]string, error)|Returns the IPv4 range from the supplied CIDR.|
-|func (t *IPTools) CIDRToIPv6(CIDR string) ([]string, error)|Returns the IPv6 range from the supplied CIDR.|
-
-## Usage
-
-```go
-package main
-
-import (
-	"github.com/ip2location/ip2location-go/v9"
-	"fmt"
-	"math/big"
-)
-
-func main() {
-	t := ip2location.OpenTools()
-	
-	ip := "8.8.8.8"
-	res := t.IsIPv4(ip)
-	
-	fmt.Printf("Is IPv4: %t\n", res)
-	
-	ipnum, err := t.IPv4ToDecimal(ip)
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("IPNum: %v\n", ipnum)
-	}
-
-	ip2 := "2600:1f18:45b0:5b00:f5d8:4183:7710:ceec"
-	res2 := t.IsIPv6(ip2)
-	
-	fmt.Printf("Is IPv6: %t\n", res2)
-
-	ipnum2, err := t.IPv6ToDecimal(ip2)
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("IPNum: %v\n", ipnum2)
-	}
-	
-	ipnum3 := big.NewInt(42534)
-	res3, err := t.DecimalToIPv4(ipnum3)
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("IPv4: %v\n", res3)
-	}
-	
-	ipnum4, ok := big.NewInt(0).SetString("22398978840339333967292465152", 10)
-	if ok {
-		res4, err := t.DecimalToIPv6(ipnum4)
-		if err != nil {
-			fmt.Print(err)
-		} else {
-			fmt.Printf("IPv6: %v\n", res4)
-		}
-	}
-	
-	ip3 := "2600:1f18:045b:005b:f5d8:0:000:ceec"
-	res5, err := t.CompressIPv6(ip3)
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("Compressed: %v\n", res5)
-	}
-	
-	ip4 := "::45b:05b:f5d8:0:000:ceec"
-	res6, err := t.ExpandIPv6(ip4)
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("Expanded: %v\n", res6)
-	}
-	
-	res7, err := t.IPv4ToCIDR("10.0.0.0", "10.10.2.255")
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		for _, element := range res7 {
-			fmt.Println(element)
-		}
-	}
-	
-	res8, err := t.IPv6ToCIDR("2001:4860:4860:0000:0000:0000:0000:8888", "2001:4860:4860:0000:eeee:ffff:ffff:ffff")
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		for _, element := range res8 {
-			fmt.Println(element)
-		}
-	}
-	
-	res9, err := t.CIDRToIPv4("123.245.99.13/26")
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("IPv4 Range: %v\n", res9)
-	}
-	
-	res10, err := t.CIDRToIPv6("2002:1234::abcd:ffff:c0a8:101/62")
-	
-	if err != nil {
-		fmt.Print(err)
-	} else {
-		fmt.Printf("IPv6 Range: %v\n", res10)
-	}
-}
-```
-
-## COUNTRY CLASS
-
-## Methods
-Below are the methods supported in this package.
-
-|Method Name|Description|
-|---|---|
-|func OpenCountryInfo(csvFile string) (*CI, error)|Expect a IP2Location Country Information CSV file. This database is free for download at https://www.ip2location.com/free/country-information|
-|func (c *CI) GetCountryInfo(countryCode ...string) ([]CountryInfoRecord, error)|Returns the country information for specified country or all countries.|
-
-## Usage
-
-```go
-package main
-
-import (
-	"github.com/ip2location/ip2location-go"
-	"fmt"
-)
-
-func main() {
-	c, err := ip2location.OpenCountryInfo("./IP2LOCATION-COUNTRY-INFORMATION.CSV")
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-
-	res, err := c.GetCountryInfo("US")
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-
-	fmt.Printf("country_code: %s\n", res[0].Country_code)
-	fmt.Printf("country_name: %s\n", res[0].Country_name)
-	fmt.Printf("country_alpha3_code: %s\n", res[0].Country_alpha3_code)
-	fmt.Printf("country_numeric_code: %s\n", res[0].Country_numeric_code)
-	fmt.Printf("capital: %s\n", res[0].Capital)
-	fmt.Printf("country_demonym: %s\n", res[0].Country_demonym)
-	fmt.Printf("total_area: %s\n", res[0].Total_area)
-	fmt.Printf("population: %s\n", res[0].Population)
-	fmt.Printf("idd_code: %s\n", res[0].Idd_code)
-	fmt.Printf("currency_code: %s\n", res[0].Currency_code)
-	fmt.Printf("currency_name: %s\n", res[0].Currency_name)
-	fmt.Printf("currency_symbol: %s\n", res[0].Currency_symbol)
-	fmt.Printf("lang_code: %s\n", res[0].Lang_code)
-	fmt.Printf("lang_name: %s\n", res[0].Lang_name)
-	fmt.Printf("cctld: %s\n", res[0].Cctld)
-	fmt.Print("==============================================\n")
-
-	res2, err := c.GetCountryInfo()
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-
-	for _, v := range res2 {
-		fmt.Printf("country_code: %s\n", v.Country_code)
-		fmt.Printf("country_name: %s\n", v.Country_name)
-		fmt.Printf("country_alpha3_code: %s\n", v.Country_alpha3_code)
-		fmt.Printf("country_numeric_code: %s\n", v.Country_numeric_code)
-		fmt.Printf("capital: %s\n", v.Capital)
-		fmt.Printf("country_demonym: %s\n", v.Country_demonym)
-		fmt.Printf("total_area: %s\n", v.Total_area)
-		fmt.Printf("population: %s\n", v.Population)
-		fmt.Printf("idd_code: %s\n", v.Idd_code)
-		fmt.Printf("currency_code: %s\n", v.Currency_code)
-		fmt.Printf("currency_name: %s\n", v.Currency_name)
-		fmt.Printf("currency_symbol: %s\n", v.Currency_symbol)
-		fmt.Printf("lang_code: %s\n", v.Lang_code)
-		fmt.Printf("lang_name: %s\n", v.Lang_name)
-		fmt.Printf("cctld: %s\n", v.Cctld)
-		fmt.Print("==============================================\n")
-	}
-}
-```
-
-## REGION CLASS
-
-## Methods
-Below are the methods supported in this package.
-
-|Method Name|Description|
-|---|---|
-|func OpenRegionInfo(csvFile string) (*RI, error)|Expect a IP2Location ISO 3166-2 Subdivision Code CSV file. This database is free for download at https://www.ip2location.com/free/iso3166-2|
-|func (r *RI) GetRegionCode(countryCode string, regionName string) (string, error)|Returns the region code for the supplied country code and region name.|
-
-## Usage
-
-```go
-package main
-
-import (
-	"github.com/ip2location/ip2location-go"
-	"fmt"
-)
-
-func main() {
-	r, err := ip2location.OpenRegionInfo("./IP2LOCATION-ISO3166-2.CSV")
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
-
-	res, err := r.GetRegionCode("US", "California")
-
-	if err != nil {
-		fmt.Print(err)
-		return
-	}
+Developer Documentation
+=====================
 
-	fmt.Printf("region code: %s\n", res)
-}
-```
\ No newline at end of file
+To learn more about installation, usage, and code examples, please visit the developer documentation at [https://ip2location-go.readthedocs.io/en/latest/](https://ip2location-go.readthedocs.io/en/latest/).
\ No newline at end of file
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go b/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go
index 4f4a6d53e527f8cab35ae4be15b6e707123e343c..417536a735b340b08bce84c05e88a854fd370929 100644
--- a/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go
+++ b/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go
@@ -159,7 +159,7 @@ var district_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 var asn_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24}
 var as_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25}
 
-const api_version string = "9.6.0"
+const api_version string = "9.7.0"
 
 var max_ipv4_range = uint128.From64(4294967295)
 var max_ipv6_range = uint128.From64(0)
@@ -580,6 +580,16 @@ func Api_version() string {
 	return api_version
 }
 
+// PackageVersion returns the database type.
+func (d *DB) PackageVersion() string {
+	return strconv.Itoa(int(d.meta.databasetype))
+}
+
+// DatabaseVersion returns the database version.
+func (d *DB) DatabaseVersion() string {
+	return "20" + strconv.Itoa(int(d.meta.databaseyear)) + "." + strconv.Itoa(int(d.meta.databasemonth)) + "." + strconv.Itoa(int(d.meta.databaseday))
+}
+
 // populate record with message
 func loadmessage(mesg string) IP2Locationrecord {
 	var x IP2Locationrecord
diff --git a/vendor/github.com/ip2location/ip2location-go/v9/iptools.go b/vendor/github.com/ip2location/ip2location-go/v9/iptools.go
index 5b9786f56d9a855383020470b567bbb1b821adc5..9808698a3f374f818e075065b00092d5d7b25fa9 100644
--- a/vendor/github.com/ip2location/ip2location-go/v9/iptools.go
+++ b/vendor/github.com/ip2location/ip2location-go/v9/iptools.go
@@ -448,31 +448,43 @@ func (t *IPTools) CIDRToIPv6(CIDR string) ([]string, error) {
 		return nil, errors.New("Not a valid CIDR.")
 	}
 
-	hexstartaddress, _ := t.ExpandIPv6(ip)
-	hexstartaddress = strings.ReplaceAll(hexstartaddress, ":", "")
-	hexendaddress := hexstartaddress
-
-	bits := 128 - prefix
-	pos := 31
-
-	for bits > 0 {
-		values := []int{4, bits}
-		min, _ := t.minMax(values)
-		x, _ := strconv.ParseInt(string(hexendaddress[pos]), 16, 64)
-		y := fmt.Sprintf("%x", (x | int64(math.Pow(2, float64(min))-1)))
+	expand, _ := t.ExpandIPv6(ip)
+	parts := strings.Split(expand, ":")
+
+	bitStart := strings.Repeat("1", prefix) + strings.Repeat("0", 128-prefix)
+	bitEnd := strings.Repeat("0", prefix) + strings.Repeat("1", 128-prefix)
+
+	n := 16 // split string into 16-char parts
+	floors := []string{}
+	for i := 0; i < len(bitStart); i += n {
+		end := i + n
+		if end > len(bitStart) {
+			end = len(bitStart)
+		}
+		floors = append(floors, bitStart[i:end])
+	}
+	ceilings := []string{}
+	for i := 0; i < len(bitEnd); i += n {
+		end := i + n
+		if end > len(bitEnd) {
+			end = len(bitEnd)
+		}
+		ceilings = append(ceilings, bitEnd[i:end])
+	}
 
-		hexendaddress = hexendaddress[:pos] + y + hexendaddress[pos+1:]
+	start := []string{}
+	end := []string{}
 
-		bits = bits - 4
-		pos = pos - 1
+	for i := 0; i < 8; i += 1 {
+		p, _ := strconv.ParseUint(parts[i], 16, 64)
+		f, _ := strconv.ParseUint(floors[i], 2, 64)
+		c, _ := strconv.ParseUint(ceilings[i], 2, 64)
+		start = append(start, strconv.FormatUint(p&f, 16))
+		end = append(end, strconv.FormatUint(p|c, 16))
 	}
 
-	re2 := regexp.MustCompile(`(.{4})`)
-	hexstartaddress = re2.ReplaceAllString(hexstartaddress, "$1:")
-	hexstartaddress = strings.TrimSuffix(hexstartaddress, ":")
-	hexendaddress = re2.ReplaceAllString(hexendaddress, "$1:")
-	hexendaddress = strings.TrimSuffix(hexendaddress, ":")
-
+	hexstartaddress, _ := t.ExpandIPv6(strings.Join(start, ":"))
+	hexendaddress, _ := t.ExpandIPv6(strings.Join(end, ":"))
 	result := []string{hexstartaddress, hexendaddress}
 
 	return result, nil
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index dde75389f1396a6996b0cc89a23d27cbc1baa879..7e83f583c00af9a20e6dbb2f222ef6e3c8b7d8ad 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,22 @@ This package provides various compression algorithms.
 
 # changelog
 
+* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
+	* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
+
+* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
+	* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
+	* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
+	* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
+
+* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
+	* Add experimental dictionary builder  https://github.com/klauspost/compress/pull/853
+	* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
+	* flate: Add limited window compression https://github.com/klauspost/compress/pull/843
+	* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
+	* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
+	* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+   
 * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
 	* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
 	* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
@@ -646,6 +662,7 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
 * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
 * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
 * [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
+* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
 
 # license
 
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index 414c0bea9fa91db8c9d022b314a3463e95f4e948..2f410d64f5affc2cfd83e67470c5a84c6fa8f0ed 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -120,8 +120,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
 	const sanity = false
 
 	if h.chunks == nil {
-		h.chunks = &[huffmanNumChunks]uint16{}
+		h.chunks = new([huffmanNumChunks]uint16)
 	}
+
 	if h.maxRead != 0 {
 		*h = huffmanDecoder{chunks: h.chunks, links: h.links}
 	}
@@ -175,6 +176,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
 	}
 
 	h.maxRead = min
+
 	chunks := h.chunks[:]
 	for i := range chunks {
 		chunks[i] = 0
@@ -202,8 +204,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
 			if cap(h.links[off]) < numLinks {
 				h.links[off] = make([]uint16, numLinks)
 			} else {
-				links := h.links[off][:0]
-				h.links[off] = links[:numLinks]
+				h.links[off] = h.links[off][:numLinks]
 			}
 		}
 	} else {
@@ -277,7 +278,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
 	return true
 }
 
-// The actual read interface needed by NewReader.
+// Reader is the actual read interface needed by NewReader.
 // If the passed in io.Reader does not also have ReadByte,
 // the NewReader will introduce its own buffering.
 type Reader interface {
@@ -285,6 +286,18 @@ type Reader interface {
 	io.ByteReader
 }
 
+type step uint8
+
+const (
+	copyData step = iota + 1
+	nextBlock
+	huffmanBytesBuffer
+	huffmanBytesReader
+	huffmanBufioReader
+	huffmanStringsReader
+	huffmanGenericReader
+)
+
 // Decompress state.
 type decompressor struct {
 	// Input source.
@@ -303,7 +316,7 @@ type decompressor struct {
 
 	// Next step in the decompression,
 	// and decompression state.
-	step      func(*decompressor)
+	step      step
 	stepState int
 	err       error
 	toRead    []byte
@@ -342,7 +355,7 @@ func (f *decompressor) nextBlock() {
 		// compressed, fixed Huffman tables
 		f.hl = &fixedHuffmanDecoder
 		f.hd = nil
-		f.huffmanBlockDecoder()()
+		f.huffmanBlockDecoder()
 		if debugDecode {
 			fmt.Println("predefinied huffman block")
 		}
@@ -353,7 +366,7 @@ func (f *decompressor) nextBlock() {
 		}
 		f.hl = &f.h1
 		f.hd = &f.h2
-		f.huffmanBlockDecoder()()
+		f.huffmanBlockDecoder()
 		if debugDecode {
 			fmt.Println("dynamic huffman block")
 		}
@@ -379,14 +392,16 @@ func (f *decompressor) Read(b []byte) (int, error) {
 		if f.err != nil {
 			return 0, f.err
 		}
-		f.step(f)
+
+		f.doStep()
+
 		if f.err != nil && len(f.toRead) == 0 {
 			f.toRead = f.dict.readFlush() // Flush what's left in case of error
 		}
 	}
 }
 
-// Support the io.WriteTo interface for io.Copy and friends.
+// WriteTo implements the io.WriteTo interface for io.Copy and friends.
 func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
 	total := int64(0)
 	flushed := false
@@ -410,7 +425,7 @@ func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
 			return total, f.err
 		}
 		if f.err == nil {
-			f.step(f)
+			f.doStep()
 		}
 		if len(f.toRead) == 0 && f.err != nil && !flushed {
 			f.toRead = f.dict.readFlush() // Flush what's left in case of error
@@ -631,7 +646,7 @@ func (f *decompressor) copyData() {
 
 	if f.dict.availWrite() == 0 || f.copyLen > 0 {
 		f.toRead = f.dict.readFlush()
-		f.step = (*decompressor).copyData
+		f.step = copyData
 		return
 	}
 	f.finishBlock()
@@ -644,7 +659,28 @@ func (f *decompressor) finishBlock() {
 		}
 		f.err = io.EOF
 	}
-	f.step = (*decompressor).nextBlock
+	f.step = nextBlock
+}
+
+func (f *decompressor) doStep() {
+	switch f.step {
+	case copyData:
+		f.copyData()
+	case nextBlock:
+		f.nextBlock()
+	case huffmanBytesBuffer:
+		f.huffmanBytesBuffer()
+	case huffmanBytesReader:
+		f.huffmanBytesReader()
+	case huffmanBufioReader:
+		f.huffmanBufioReader()
+	case huffmanStringsReader:
+		f.huffmanStringsReader()
+	case huffmanGenericReader:
+		f.huffmanGenericReader()
+	default:
+		panic("BUG: unexpected step state")
+	}
 }
 
 // noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
@@ -747,7 +783,7 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
 		h1:       f.h1,
 		h2:       f.h2,
 		dict:     f.dict,
-		step:     (*decompressor).nextBlock,
+		step:     nextBlock,
 	}
 	f.dict.init(maxMatchOffset, dict)
 	return nil
@@ -768,7 +804,7 @@ func NewReader(r io.Reader) io.ReadCloser {
 	f.r = makeReader(r)
 	f.bits = new([maxNumLit + maxNumDist]int)
 	f.codebits = new([numCodes]int)
-	f.step = (*decompressor).nextBlock
+	f.step = nextBlock
 	f.dict.init(maxMatchOffset, nil)
 	return &f
 }
@@ -787,7 +823,7 @@ func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
 	f.r = makeReader(r)
 	f.bits = new([maxNumLit + maxNumDist]int)
 	f.codebits = new([numCodes]int)
-	f.step = (*decompressor).nextBlock
+	f.step = nextBlock
 	f.dict.init(maxMatchOffset, dict)
 	return &f
 }
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
index 61342b6b88fbd267ee5c0f628fbdec6b5cda2f10..2b2f993f753746444ad92ea4dee2559c7f4728be 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -85,7 +85,7 @@ readLiteral:
 			dict.writeByte(byte(v))
 			if dict.availWrite() == 0 {
 				f.toRead = dict.readFlush()
-				f.step = (*decompressor).huffmanBytesBuffer
+				f.step = huffmanBytesBuffer
 				f.stepState = stateInit
 				f.b, f.nb = fb, fnb
 				return
@@ -251,7 +251,7 @@ copyHistory:
 
 		if dict.availWrite() == 0 || f.copyLen > 0 {
 			f.toRead = dict.readFlush()
-			f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
+			f.step = huffmanBytesBuffer // We need to continue this work
 			f.stepState = stateDict
 			f.b, f.nb = fb, fnb
 			return
@@ -336,7 +336,7 @@ readLiteral:
 			dict.writeByte(byte(v))
 			if dict.availWrite() == 0 {
 				f.toRead = dict.readFlush()
-				f.step = (*decompressor).huffmanBytesReader
+				f.step = huffmanBytesReader
 				f.stepState = stateInit
 				f.b, f.nb = fb, fnb
 				return
@@ -502,7 +502,7 @@ copyHistory:
 
 		if dict.availWrite() == 0 || f.copyLen > 0 {
 			f.toRead = dict.readFlush()
-			f.step = (*decompressor).huffmanBytesReader // We need to continue this work
+			f.step = huffmanBytesReader // We need to continue this work
 			f.stepState = stateDict
 			f.b, f.nb = fb, fnb
 			return
@@ -587,7 +587,7 @@ readLiteral:
 			dict.writeByte(byte(v))
 			if dict.availWrite() == 0 {
 				f.toRead = dict.readFlush()
-				f.step = (*decompressor).huffmanBufioReader
+				f.step = huffmanBufioReader
 				f.stepState = stateInit
 				f.b, f.nb = fb, fnb
 				return
@@ -753,7 +753,7 @@ copyHistory:
 
 		if dict.availWrite() == 0 || f.copyLen > 0 {
 			f.toRead = dict.readFlush()
-			f.step = (*decompressor).huffmanBufioReader // We need to continue this work
+			f.step = huffmanBufioReader // We need to continue this work
 			f.stepState = stateDict
 			f.b, f.nb = fb, fnb
 			return
@@ -838,7 +838,7 @@ readLiteral:
 			dict.writeByte(byte(v))
 			if dict.availWrite() == 0 {
 				f.toRead = dict.readFlush()
-				f.step = (*decompressor).huffmanStringsReader
+				f.step = huffmanStringsReader
 				f.stepState = stateInit
 				f.b, f.nb = fb, fnb
 				return
@@ -1004,7 +1004,7 @@ copyHistory:
 
 		if dict.availWrite() == 0 || f.copyLen > 0 {
 			f.toRead = dict.readFlush()
-			f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+			f.step = huffmanStringsReader // We need to continue this work
 			f.stepState = stateDict
 			f.b, f.nb = fb, fnb
 			return
@@ -1089,7 +1089,7 @@ readLiteral:
 			dict.writeByte(byte(v))
 			if dict.availWrite() == 0 {
 				f.toRead = dict.readFlush()
-				f.step = (*decompressor).huffmanGenericReader
+				f.step = huffmanGenericReader
 				f.stepState = stateInit
 				f.b, f.nb = fb, fnb
 				return
@@ -1255,7 +1255,7 @@ copyHistory:
 
 		if dict.availWrite() == 0 || f.copyLen > 0 {
 			f.toRead = dict.readFlush()
-			f.step = (*decompressor).huffmanGenericReader // We need to continue this work
+			f.step = huffmanGenericReader // We need to continue this work
 			f.stepState = stateDict
 			f.b, f.nb = fb, fnb
 			return
@@ -1265,19 +1265,19 @@ copyHistory:
 	// Not reached
 }
 
-func (f *decompressor) huffmanBlockDecoder() func() {
+func (f *decompressor) huffmanBlockDecoder() {
 	switch f.r.(type) {
 	case *bytes.Buffer:
-		return f.huffmanBytesBuffer
+		f.huffmanBytesBuffer()
 	case *bytes.Reader:
-		return f.huffmanBytesReader
+		f.huffmanBytesReader()
 	case *bufio.Reader:
-		return f.huffmanBufioReader
+		f.huffmanBufioReader()
 	case *strings.Reader:
-		return f.huffmanStringsReader
+		f.huffmanStringsReader()
 	case Reader:
-		return f.huffmanGenericReader
+		f.huffmanGenericReader()
 	default:
-		return f.huffmanGenericReader
+		f.huffmanGenericReader()
 	}
 }
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index 65d777357aac2dc5273dc1251b8fa2ed44a57ccb..074018d8f94ccf146426ca1da0256c839700b316 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error {
 		previous0 bool
 		charnum   uint16
 
-		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+		maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
 
 		// Write Table Size
 		bitStream = uint32(tableLog - minTablelog)
diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go
index dc2362a63bf81411218213775719f5fad839ef52..00a0a2c386982223c80737f00aeab16572fe5159 100644
--- a/vendor/github.com/klauspost/compress/gzip/gunzip.go
+++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go
@@ -238,6 +238,11 @@ func (z *Reader) readHeader() (hdr Header, err error) {
 		}
 	}
 
+	// Reserved FLG bits must be zero.
+	if flg>>5 != 0 {
+		return hdr, ErrHeader
+	}
+
 	z.digest = 0
 	if z.decompressor == nil {
 		z.decompressor = flate.NewReader(z.r)
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
deleted file mode 100644
index 4dcab8d232775c0f46e348837e70df7f7bcc847f..0000000000000000000000000000000000000000
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 Klaus Post. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
-
-package huff0
-
-// byteReader provides a byte reader that reads
-// little endian values from a byte stream.
-// The input stream is manually advanced.
-// The reader performs no bounds checks.
-type byteReader struct {
-	b   []byte
-	off int
-}
-
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
-	b.b = in
-	b.off = 0
-}
-
-// Int32 returns a little endian int32 starting at current offset.
-func (b byteReader) Int32() int32 {
-	v3 := int32(b.b[b.off+3])
-	v2 := int32(b.b[b.off+2])
-	v1 := int32(b.b[b.off+1])
-	v0 := int32(b.b[b.off])
-	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// Uint32 returns a little endian uint32 starting at current offset.
-func (b byteReader) Uint32() uint32 {
-	v3 := uint32(b.b[b.off+3])
-	v2 := uint32(b.b[b.off+2])
-	v1 := uint32(b.b[b.off+1])
-	v0 := uint32(b.b[b.off])
-	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// remain will return the number of bytes remaining.
-func (b byteReader) remain() int {
-	return len(b.b) - b.off
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 518436cf3d44b0d7e8a54e72073c3076e6f8d61d..84aa3d12f0045b7e4c32d1833888d8c33d273a93 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
 // Does not update s.clearCount.
 func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
 	reuse = true
+	_ = s.count // Assert that s != nil to speed up the following loop.
 	for _, v := range in {
 		s.count[v]++
 	}
@@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool {
 
 // minTableLog provides the minimum logSize to safely represent a distribution.
 func (s *Scratch) minTableLog() uint8 {
-	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+	minBitsSrc := highBit32(uint32(s.srcLen)) + 1
 	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
 	if minBitsSrc < minBitsSymbols {
 		return uint8(minBitsSrc)
@@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 {
 func (s *Scratch) optimalTableLog() {
 	tableLog := s.TableLog
 	minBits := s.minTableLog()
-	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+	maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
 	if maxBitsSrc < tableLog {
 		// Accuracy can be reduced
 		tableLog = maxBitsSrc
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index e8ad17ad08ef1763d193a65d65898ff9a5ffcb39..77ecd68e0a7bc2cb0e3f6e0ea808ee48f38d417a 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -88,7 +88,7 @@ type Scratch struct {
 	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
 	MaxDecodedSize int
 
-	br byteReader
+	srcLen int
 
 	// MaxSymbolValue will override the maximum symbol value of the next block.
 	MaxSymbolValue uint8
@@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
 	if s.fse == nil {
 		s.fse = &fse.Scratch{}
 	}
-	s.br.init(in)
+	s.srcLen = len(in)
 
 	return s, nil
 }
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
index e6c2310212f273e7bf7fab4b710d6f02cda978bc..0c9088adfee0cec0bf9b348bf75bb97883a3a9c3 100644
--- a/vendor/github.com/klauspost/compress/s2/encode.go
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -57,7 +57,7 @@ func Encode(dst, src []byte) []byte {
 // The function returns -1 if no improvement could be achieved.
 // Using actual compression will most often produce better compression than the estimate.
 func EstimateBlockSize(src []byte) (d int) {
-	if len(src) < 6 || int64(len(src)) > 0xffffffff {
+	if len(src) <= inputMargin || int64(len(src)) > 0xffffffff {
 		return -1
 	}
 	if len(src) <= 1024 {
diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go
index 1d13e869a111536f0b52d12d11357974c1e5c996..47bac74234b0f204e9d6554ada035db48027039b 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_best.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_best.go
@@ -157,6 +157,9 @@ func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
 				return m
 			}
 			matchDict := func(candidate, s int, first uint32, rep bool) match {
+				if s >= MaxDictSrcOffset {
+					return match{offset: candidate, s: s}
+				}
 				// Calculate offset as if in continuous array with s
 				offset := -len(dict.dict) + candidate
 				if best.length != 0 && best.s-best.offset == s-offset && !rep {
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
index 0d39c7b0e0134fe412e211c17f11cc36b7117c84..6b393c34d376cc4c61da4eb92990688b43e3e037 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_go.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -316,6 +316,7 @@ func matchLen(a []byte, b []byte) int {
 	return len(a) + checked
 }
 
+// input must be > inputMargin
 func calcBlockSize(src []byte) (d int) {
 	// Initialize the hash table.
 	const (
@@ -501,6 +502,7 @@ emitRemainder:
 	return d
 }
 
+// length must be > inputMargin.
 func calcBlockSizeSmall(src []byte) (d int) {
 	// Initialize the hash table.
 	const (
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index bdd49c8b25d2da1f10eb86b4a5494a6edc6e905d..92e2347bbc0ea2e63a362417b639418462614d66 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv   gzkp    1   3325605752  922273214   13929   227.68
 
 ## Decompressor
 
-Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
 
 This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
 kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 9819d414536b17241319c1ed49c145aaee58d32b..c81a15357af60832f03b273a0ee7c2195ec9e254 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
 	if m.rep < 0 {
 		ofc = ofCode(uint32(m.s-m.offset) + 3)
 	} else {
-		ofc = ofCode(uint32(m.rep))
+		ofc = ofCode(uint32(m.rep) & 3)
 	}
 	// Cost, excluding
 	ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@@ -197,12 +197,13 @@ encodeLoop:
 
 		// Set m to a match at offset if it looks like that will improve compression.
 		improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
-			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
+			delta := s - offset
+			if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
 				return
 			}
 			if debugAsserts {
-				if offset <= 0 {
-					panic(offset)
+				if offset >= s {
+					panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
 				}
 				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
 					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
@@ -226,7 +227,7 @@ encodeLoop:
 				}
 			}
 			l := 4 + e.matchlen(s+4, offset+4, src)
-			if rep < 0 {
+			if true {
 				// Extend candidate match backwards as far as possible.
 				tMin := s - e.maxMatchOff
 				if tMin < 0 {
@@ -281,6 +282,7 @@ encodeLoop:
 		// Load next and check...
 		e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
 		e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+		index0 := s + 1
 
 		// Look far ahead, unless we have a really long match already...
 		if best.length < goodEnough {
@@ -343,8 +345,8 @@ encodeLoop:
 		if best.rep > 0 {
 			var seq seq
 			seq.matchLen = uint32(best.length - zstdMinMatch)
-			if debugAsserts && s <= nextEmit {
-				panic("s <= nextEmit")
+			if debugAsserts && s < nextEmit {
+				panic("s < nextEmit")
 			}
 			addLiterals(&seq, best.s)
 
@@ -356,19 +358,16 @@ encodeLoop:
 			blk.sequences = append(blk.sequences, seq)
 
 			// Index old s + 1 -> s - 1
-			index0 := s + 1
 			s = best.s + best.length
-
 			nextEmit = s
-			if s >= sLimit {
-				if debugEncoder {
-					println("repeat ended", s, best.length)
-				}
-				break encodeLoop
-			}
+
 			// Index skipped...
+			end := s
+			if s > sLimit+4 {
+				end = sLimit + 4
+			}
 			off := index0 + e.cur
-			for index0 < s {
+			for index0 < end {
 				cv0 := load6432(src, index0)
 				h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
 				h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -377,6 +376,7 @@ encodeLoop:
 				off++
 				index0++
 			}
+
 			switch best.rep {
 			case 2, 4 | 1:
 				offset1, offset2 = offset2, offset1
@@ -385,12 +385,17 @@ encodeLoop:
 			case 4 | 3:
 				offset1, offset2, offset3 = offset1-1, offset1, offset2
 			}
+			if s >= sLimit {
+				if debugEncoder {
+					println("repeat ended", s, best.length)
+				}
+				break encodeLoop
+			}
 			continue
 		}
 
 		// A 4-byte match has been found. Update recent offsets.
 		// We'll later see if more than 4 bytes.
-		index0 := s + 1
 		s = best.s
 		t := best.offset
 		offset1, offset2, offset3 = s-t, offset1, offset2
@@ -418,19 +423,25 @@ encodeLoop:
 		}
 		blk.sequences = append(blk.sequences, seq)
 		nextEmit = s
-		if s >= sLimit {
-			break encodeLoop
+
+		// Index old s + 1 -> s - 1 or sLimit
+		end := s
+		if s > sLimit-4 {
+			end = sLimit - 4
 		}
 
-		// Index old s + 1 -> s - 1
-		for index0 < s {
+		off := index0 + e.cur
+		for index0 < end {
 			cv0 := load6432(src, index0)
 			h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
 			h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
 			index0++
+			off++
+		}
+		if s >= sLimit {
+			break encodeLoop
 		}
 	}
 
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 8582f31a7cc4c3f252be1ee149980c4ae7d0785e..20d25b0e0523e9c8ee426e45495d8d80977aab46 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -145,7 +145,7 @@ encodeLoop:
 		var t int32
 		// We allow the encoder to optionally turn off repeat offsets across blocks
 		canRepeat := len(blk.sequences) > 2
-		var matched int32
+		var matched, index0 int32
 
 		for {
 			if debugAsserts && canRepeat && offset1 == 0 {
@@ -162,6 +162,7 @@ encodeLoop:
 			off := s + e.cur
 			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
 			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+			index0 = s + 1
 
 			if canRepeat {
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@@ -258,7 +259,6 @@ encodeLoop:
 					}
 					blk.sequences = append(blk.sequences, seq)
 
-					index0 := s + repOff2
 					s += lenght + repOff2
 					nextEmit = s
 					if s >= sLimit {
@@ -498,15 +498,15 @@ encodeLoop:
 		}
 
 		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
+		off := index0 + e.cur
 		for index0 < s-1 {
 			cv0 := load6432(src, index0)
 			cv1 := cv0 >> 8
 			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
 			index0 += 2
+			off += 2
 		}
 
 		cv = load6432(src, s)
@@ -672,7 +672,7 @@ encodeLoop:
 		var t int32
 		// We allow the encoder to optionally turn off repeat offsets across blocks
 		canRepeat := len(blk.sequences) > 2
-		var matched int32
+		var matched, index0 int32
 
 		for {
 			if debugAsserts && canRepeat && offset1 == 0 {
@@ -691,6 +691,7 @@ encodeLoop:
 			e.markLongShardDirty(nextHashL)
 			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
 			e.markShortShardDirty(nextHashS)
+			index0 = s + 1
 
 			if canRepeat {
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@@ -726,7 +727,6 @@ encodeLoop:
 					blk.sequences = append(blk.sequences, seq)
 
 					// Index match start+1 (long) -> s - 1
-					index0 := s + repOff
 					s += lenght + repOff
 
 					nextEmit = s
@@ -790,7 +790,6 @@ encodeLoop:
 					}
 					blk.sequences = append(blk.sequences, seq)
 
-					index0 := s + repOff2
 					s += lenght + repOff2
 					nextEmit = s
 					if s >= sLimit {
@@ -1024,18 +1023,18 @@ encodeLoop:
 		}
 
 		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
+		off := index0 + e.cur
 		for index0 < s-1 {
 			cv0 := load6432(src, index0)
 			cv1 := cv0 >> 8
 			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.markLongShardDirty(h0)
 			h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
 			e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
 			e.markShortShardDirty(h1)
 			index0 += 2
+			off += 2
 		}
 
 		cv = load6432(src, s)
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
index accd7abaf990a33048482a8a2227538de882ea9d..30f8d2963e213f1b4523e83a956b4bd3aa77103b 100644
--- a/vendor/github.com/klauspost/cpuid/v2/README.md
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -9,10 +9,7 @@ You can access the CPU information by accessing the shared CPU variable of the c
 Package home: https://github.com/klauspost/cpuid
 
 [![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2)
-[![Build Status][3]][4]
-
-[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master
-[4]: https://travis-ci.org/klauspost/cpuid
+[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml)
 
 ## installing
 
@@ -285,7 +282,12 @@ Exit Code 1
 | AMXINT8            | Tile computational operations on 8-bit integers                                                                                                                                    |
 | AMXFP16            | Tile computational operations on FP16 numbers                                                                                                                                      |
 | AMXTILE            | Tile architecture                                                                                                                                                                  |
+| APX_F              | Intel APX                                                                                                                                                                          |
 | AVX                | AVX functions                                                                                                                                                                      |
+| AVX10              | If set the Intel AVX10 Converged Vector ISA is supported                                                                                                                           |
+| AVX10_128          | If set indicates that AVX10 128-bit vector support is present                                                                                                                      |
+| AVX10_256          | If set indicates that AVX10 256-bit vector support is present                                                                                                                      |
+| AVX10_512          | If set indicates that AVX10 512-bit vector support is present                                                                                                                      |
 | AVX2               | AVX2 functions                                                                                                                                                                     |
 | AVX512BF16         | AVX-512 BFLOAT16 Instructions                                                                                                                                                      |
 | AVX512BITALG       | AVX-512 Bit Algorithms                                                                                                                                                             |
@@ -365,6 +367,8 @@ Exit Code 1
 | IDPRED_CTRL        | IPRED_DIS                                                                                                                                                                          |
 | INT_WBINVD         | WBINVD/WBNOINVD are interruptible.                                                                                                                                                 |
 | INVLPGB            | NVLPGB and TLBSYNC instruction supported                                                                                                                                           |
+| KEYLOCKER          | Key locker                                                                                                                                                                         |
+| KEYLOCKERW         | Key locker wide                                                                                                                                                                    |
 | LAHF               | LAHF/SAHF in long mode                                                                                                                                                             |
 | LAM                | If set, CPU supports Linear Address Masking                                                                                                                                        |
 | LBRVIRT            | LBR virtualization                                                                                                                                                                 |
@@ -380,7 +384,7 @@ Exit Code 1
 | MOVDIRI            | Move Doubleword as Direct Store                                                                                                                                                    |
 | MOVSB_ZL           | Fast Zero-Length MOVSB                                                                                                                                                             |
 | MPX                | Intel MPX (Memory Protection Extensions)                                                                                                                                           |
-| MOVU               | MOVU SSE instructions are more efficient and should be preferred to SSE	MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD       |
+| MOVU               | MOVU SSE instructions are more efficient and should be preferred to SSE	MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD        |
 | MSRIRC             | Instruction Retired Counter MSR available                                                                                                                                          |
 | MSRLIST            | Read/Write List of Model Specific Registers                                                                                                                                        |
 | MSR_PAGEFLUSH      | Page Flush MSR available                                                                                                                                                           |
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
index d015c744e8a10fa054bd4f075465df32cf8a8d95..15b760337aead8f9afbeb04cf56c4728d5915a59 100644
--- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -76,7 +76,12 @@ const (
 	AMXFP16                             // Tile computational operations on FP16 numbers
 	AMXINT8                             // Tile computational operations on 8-bit integers
 	AMXTILE                             // Tile architecture
+	APX_F                               // Intel APX
 	AVX                                 // AVX functions
+	AVX10                               // If set the Intel AVX10 Converged Vector ISA is supported
+	AVX10_128                           // If set indicates that AVX10 128-bit vector support is present
+	AVX10_256                           // If set indicates that AVX10 256-bit vector support is present
+	AVX10_512                           // If set indicates that AVX10 512-bit vector support is present
 	AVX2                                // AVX2 functions
 	AVX512BF16                          // AVX-512 BFLOAT16 Instructions
 	AVX512BITALG                        // AVX-512 Bit Algorithms
@@ -156,6 +161,8 @@ const (
 	IDPRED_CTRL                         // IPRED_DIS
 	INT_WBINVD                          // WBINVD/WBNOINVD are interruptible.
 	INVLPGB                             // NVLPGB and TLBSYNC instruction supported
+	KEYLOCKER                           // Key locker
+	KEYLOCKERW                          // Key locker wide
 	LAHF                                // LAHF/SAHF in long mode
 	LAM                                 // If set, CPU supports Linear Address Masking
 	LBRVIRT                             // LBR virtualization
@@ -302,9 +309,10 @@ type CPUInfo struct {
 		L2  int // L2 Cache (per core or shared). Will be -1 if undetected
 		L3  int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected
 	}
-	SGX       SGXSupport
-	maxFunc   uint32
-	maxExFunc uint32
+	SGX        SGXSupport
+	AVX10Level uint8
+	maxFunc    uint32
+	maxExFunc  uint32
 }
 
 var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
@@ -1165,6 +1173,7 @@ func support() flagSet {
 		fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ)
 		fs.setIf(ecx&(1<<13) != 0, TME)
 		fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
+		fs.setIf(ecx&(1<<23) != 0, KEYLOCKER)
 		fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
 		fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
 		fs.setIf(ecx&(1<<29) != 0, ENQCMD)
@@ -1202,6 +1211,8 @@ func support() flagSet {
 		fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
 		fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
 		fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
+		fs.setIf(edx1&(1<<19) != 0, AVX10)
+		fs.setIf(edx1&(1<<21) != 0, APX_F)
 
 		// Only detect AVX-512 features if XGETBV is supported
 		if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
@@ -1252,6 +1263,19 @@ func support() flagSet {
 		fs.setIf(edx&(1<<4) != 0, BHI_CTRL)
 		fs.setIf(edx&(1<<5) != 0, MCDT_NO)
 
+		// Add keylocker features.
+		if fs.inSet(KEYLOCKER) && mfi >= 0x19 {
+			_, ebx, _, _ := cpuidex(0x19, 0)
+			fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4)
+		}
+
+		// Add AVX10 features.
+		if fs.inSet(AVX10) && mfi >= 0x24 {
+			_, ebx, _, _ := cpuidex(0x24, 0)
+			fs.setIf(ebx&(1<<16) != 0, AVX10_128)
+			fs.setIf(ebx&(1<<17) != 0, AVX10_256)
+			fs.setIf(ebx&(1<<18) != 0, AVX10_512)
+		}
 	}
 
 	// Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
@@ -1394,6 +1418,20 @@ func support() flagSet {
 		fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
 	}
 
+	if mfi >= 0x20 {
+		// Microsoft has decided to purposefully hide the information
+		// of the guest TEE when VMs are being created using Hyper-V.
+		//
+		// This leads us to check for the Hyper-V cpuid features
+		// (0x4000000C), and then for the `ebx` value set.
+		//
+		// For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part
+		// we're mostly interested about,according to:
+		// https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174
+		_, ebx, _, _ := cpuid(0x4000000C)
+		fs.setIf(ebx == 0xbe3, TDX_GUEST)
+	}
+
 	if mfi >= 0x21 {
 		// Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
 		_, ebx, ecx, edx := cpuid(0x21)
@@ -1404,6 +1442,14 @@ func support() flagSet {
 	return fs
 }
 
+func (c *CPUInfo) supportAVX10() uint8 {
+	if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) {
+		_, ebx, _, _ := cpuidex(0x24, 0)
+		return uint8(ebx)
+	}
+	return 0
+}
+
 func valAsString(values ...uint32) []byte {
 	r := make([]byte, 4*len(values))
 	for i, v := range values {
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
index c946824ec0aeaa86add1cbf2569d00a8e62f8fc0..c7dfa125de49c3280060c20754f8967e34dd0aa1 100644
--- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -31,6 +31,7 @@ func addInfo(c *CPUInfo, safe bool) {
 	c.LogicalCores = logicalCores()
 	c.PhysicalCores = physicalCores()
 	c.VendorID, c.VendorString = vendorID()
+	c.AVX10Level = c.supportAVX10()
 	c.cacheSize()
 	c.frequencies()
 }
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
index 024c706af59b4b79c1d2ecbd2cf00d94dc1dccd5..43bd05f51685a2954fbda56da7c268ea166f7faf 100644
--- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -16,210 +16,217 @@ func _() {
 	_ = x[AMXFP16-6]
 	_ = x[AMXINT8-7]
 	_ = x[AMXTILE-8]
-	_ = x[AVX-9]
-	_ = x[AVX2-10]
-	_ = x[AVX512BF16-11]
-	_ = x[AVX512BITALG-12]
-	_ = x[AVX512BW-13]
-	_ = x[AVX512CD-14]
-	_ = x[AVX512DQ-15]
-	_ = x[AVX512ER-16]
-	_ = x[AVX512F-17]
-	_ = x[AVX512FP16-18]
-	_ = x[AVX512IFMA-19]
-	_ = x[AVX512PF-20]
-	_ = x[AVX512VBMI-21]
-	_ = x[AVX512VBMI2-22]
-	_ = x[AVX512VL-23]
-	_ = x[AVX512VNNI-24]
-	_ = x[AVX512VP2INTERSECT-25]
-	_ = x[AVX512VPOPCNTDQ-26]
-	_ = x[AVXIFMA-27]
-	_ = x[AVXNECONVERT-28]
-	_ = x[AVXSLOW-29]
-	_ = x[AVXVNNI-30]
-	_ = x[AVXVNNIINT8-31]
-	_ = x[BHI_CTRL-32]
-	_ = x[BMI1-33]
-	_ = x[BMI2-34]
-	_ = x[CETIBT-35]
-	_ = x[CETSS-36]
-	_ = x[CLDEMOTE-37]
-	_ = x[CLMUL-38]
-	_ = x[CLZERO-39]
-	_ = x[CMOV-40]
-	_ = x[CMPCCXADD-41]
-	_ = x[CMPSB_SCADBS_SHORT-42]
-	_ = x[CMPXCHG8-43]
-	_ = x[CPBOOST-44]
-	_ = x[CPPC-45]
-	_ = x[CX16-46]
-	_ = x[EFER_LMSLE_UNS-47]
-	_ = x[ENQCMD-48]
-	_ = x[ERMS-49]
-	_ = x[F16C-50]
-	_ = x[FLUSH_L1D-51]
-	_ = x[FMA3-52]
-	_ = x[FMA4-53]
-	_ = x[FP128-54]
-	_ = x[FP256-55]
-	_ = x[FSRM-56]
-	_ = x[FXSR-57]
-	_ = x[FXSROPT-58]
-	_ = x[GFNI-59]
-	_ = x[HLE-60]
-	_ = x[HRESET-61]
-	_ = x[HTT-62]
-	_ = x[HWA-63]
-	_ = x[HYBRID_CPU-64]
-	_ = x[HYPERVISOR-65]
-	_ = x[IA32_ARCH_CAP-66]
-	_ = x[IA32_CORE_CAP-67]
-	_ = x[IBPB-68]
-	_ = x[IBRS-69]
-	_ = x[IBRS_PREFERRED-70]
-	_ = x[IBRS_PROVIDES_SMP-71]
-	_ = x[IBS-72]
-	_ = x[IBSBRNTRGT-73]
-	_ = x[IBSFETCHSAM-74]
-	_ = x[IBSFFV-75]
-	_ = x[IBSOPCNT-76]
-	_ = x[IBSOPCNTEXT-77]
-	_ = x[IBSOPSAM-78]
-	_ = x[IBSRDWROPCNT-79]
-	_ = x[IBSRIPINVALIDCHK-80]
-	_ = x[IBS_FETCH_CTLX-81]
-	_ = x[IBS_OPDATA4-82]
-	_ = x[IBS_OPFUSE-83]
-	_ = x[IBS_PREVENTHOST-84]
-	_ = x[IBS_ZEN4-85]
-	_ = x[IDPRED_CTRL-86]
-	_ = x[INT_WBINVD-87]
-	_ = x[INVLPGB-88]
-	_ = x[LAHF-89]
-	_ = x[LAM-90]
-	_ = x[LBRVIRT-91]
-	_ = x[LZCNT-92]
-	_ = x[MCAOVERFLOW-93]
-	_ = x[MCDT_NO-94]
-	_ = x[MCOMMIT-95]
-	_ = x[MD_CLEAR-96]
-	_ = x[MMX-97]
-	_ = x[MMXEXT-98]
-	_ = x[MOVBE-99]
-	_ = x[MOVDIR64B-100]
-	_ = x[MOVDIRI-101]
-	_ = x[MOVSB_ZL-102]
-	_ = x[MOVU-103]
-	_ = x[MPX-104]
-	_ = x[MSRIRC-105]
-	_ = x[MSRLIST-106]
-	_ = x[MSR_PAGEFLUSH-107]
-	_ = x[NRIPS-108]
-	_ = x[NX-109]
-	_ = x[OSXSAVE-110]
-	_ = x[PCONFIG-111]
-	_ = x[POPCNT-112]
-	_ = x[PPIN-113]
-	_ = x[PREFETCHI-114]
-	_ = x[PSFD-115]
-	_ = x[RDPRU-116]
-	_ = x[RDRAND-117]
-	_ = x[RDSEED-118]
-	_ = x[RDTSCP-119]
-	_ = x[RRSBA_CTRL-120]
-	_ = x[RTM-121]
-	_ = x[RTM_ALWAYS_ABORT-122]
-	_ = x[SERIALIZE-123]
-	_ = x[SEV-124]
-	_ = x[SEV_64BIT-125]
-	_ = x[SEV_ALTERNATIVE-126]
-	_ = x[SEV_DEBUGSWAP-127]
-	_ = x[SEV_ES-128]
-	_ = x[SEV_RESTRICTED-129]
-	_ = x[SEV_SNP-130]
-	_ = x[SGX-131]
-	_ = x[SGXLC-132]
-	_ = x[SHA-133]
-	_ = x[SME-134]
-	_ = x[SME_COHERENT-135]
-	_ = x[SPEC_CTRL_SSBD-136]
-	_ = x[SRBDS_CTRL-137]
-	_ = x[SSE-138]
-	_ = x[SSE2-139]
-	_ = x[SSE3-140]
-	_ = x[SSE4-141]
-	_ = x[SSE42-142]
-	_ = x[SSE4A-143]
-	_ = x[SSSE3-144]
-	_ = x[STIBP-145]
-	_ = x[STIBP_ALWAYSON-146]
-	_ = x[STOSB_SHORT-147]
-	_ = x[SUCCOR-148]
-	_ = x[SVM-149]
-	_ = x[SVMDA-150]
-	_ = x[SVMFBASID-151]
-	_ = x[SVML-152]
-	_ = x[SVMNP-153]
-	_ = x[SVMPF-154]
-	_ = x[SVMPFT-155]
-	_ = x[SYSCALL-156]
-	_ = x[SYSEE-157]
-	_ = x[TBM-158]
-	_ = x[TDX_GUEST-159]
-	_ = x[TLB_FLUSH_NESTED-160]
-	_ = x[TME-161]
-	_ = x[TOPEXT-162]
-	_ = x[TSCRATEMSR-163]
-	_ = x[TSXLDTRK-164]
-	_ = x[VAES-165]
-	_ = x[VMCBCLEAN-166]
-	_ = x[VMPL-167]
-	_ = x[VMSA_REGPROT-168]
-	_ = x[VMX-169]
-	_ = x[VPCLMULQDQ-170]
-	_ = x[VTE-171]
-	_ = x[WAITPKG-172]
-	_ = x[WBNOINVD-173]
-	_ = x[WRMSRNS-174]
-	_ = x[X87-175]
-	_ = x[XGETBV1-176]
-	_ = x[XOP-177]
-	_ = x[XSAVE-178]
-	_ = x[XSAVEC-179]
-	_ = x[XSAVEOPT-180]
-	_ = x[XSAVES-181]
-	_ = x[AESARM-182]
-	_ = x[ARMCPUID-183]
-	_ = x[ASIMD-184]
-	_ = x[ASIMDDP-185]
-	_ = x[ASIMDHP-186]
-	_ = x[ASIMDRDM-187]
-	_ = x[ATOMICS-188]
-	_ = x[CRC32-189]
-	_ = x[DCPOP-190]
-	_ = x[EVTSTRM-191]
-	_ = x[FCMA-192]
-	_ = x[FP-193]
-	_ = x[FPHP-194]
-	_ = x[GPA-195]
-	_ = x[JSCVT-196]
-	_ = x[LRCPC-197]
-	_ = x[PMULL-198]
-	_ = x[SHA1-199]
-	_ = x[SHA2-200]
-	_ = x[SHA3-201]
-	_ = x[SHA512-202]
-	_ = x[SM3-203]
-	_ = x[SM4-204]
-	_ = x[SVE-205]
-	_ = x[lastID-206]
+	_ = x[APX_F-9]
+	_ = x[AVX-10]
+	_ = x[AVX10-11]
+	_ = x[AVX10_128-12]
+	_ = x[AVX10_256-13]
+	_ = x[AVX10_512-14]
+	_ = x[AVX2-15]
+	_ = x[AVX512BF16-16]
+	_ = x[AVX512BITALG-17]
+	_ = x[AVX512BW-18]
+	_ = x[AVX512CD-19]
+	_ = x[AVX512DQ-20]
+	_ = x[AVX512ER-21]
+	_ = x[AVX512F-22]
+	_ = x[AVX512FP16-23]
+	_ = x[AVX512IFMA-24]
+	_ = x[AVX512PF-25]
+	_ = x[AVX512VBMI-26]
+	_ = x[AVX512VBMI2-27]
+	_ = x[AVX512VL-28]
+	_ = x[AVX512VNNI-29]
+	_ = x[AVX512VP2INTERSECT-30]
+	_ = x[AVX512VPOPCNTDQ-31]
+	_ = x[AVXIFMA-32]
+	_ = x[AVXNECONVERT-33]
+	_ = x[AVXSLOW-34]
+	_ = x[AVXVNNI-35]
+	_ = x[AVXVNNIINT8-36]
+	_ = x[BHI_CTRL-37]
+	_ = x[BMI1-38]
+	_ = x[BMI2-39]
+	_ = x[CETIBT-40]
+	_ = x[CETSS-41]
+	_ = x[CLDEMOTE-42]
+	_ = x[CLMUL-43]
+	_ = x[CLZERO-44]
+	_ = x[CMOV-45]
+	_ = x[CMPCCXADD-46]
+	_ = x[CMPSB_SCADBS_SHORT-47]
+	_ = x[CMPXCHG8-48]
+	_ = x[CPBOOST-49]
+	_ = x[CPPC-50]
+	_ = x[CX16-51]
+	_ = x[EFER_LMSLE_UNS-52]
+	_ = x[ENQCMD-53]
+	_ = x[ERMS-54]
+	_ = x[F16C-55]
+	_ = x[FLUSH_L1D-56]
+	_ = x[FMA3-57]
+	_ = x[FMA4-58]
+	_ = x[FP128-59]
+	_ = x[FP256-60]
+	_ = x[FSRM-61]
+	_ = x[FXSR-62]
+	_ = x[FXSROPT-63]
+	_ = x[GFNI-64]
+	_ = x[HLE-65]
+	_ = x[HRESET-66]
+	_ = x[HTT-67]
+	_ = x[HWA-68]
+	_ = x[HYBRID_CPU-69]
+	_ = x[HYPERVISOR-70]
+	_ = x[IA32_ARCH_CAP-71]
+	_ = x[IA32_CORE_CAP-72]
+	_ = x[IBPB-73]
+	_ = x[IBRS-74]
+	_ = x[IBRS_PREFERRED-75]
+	_ = x[IBRS_PROVIDES_SMP-76]
+	_ = x[IBS-77]
+	_ = x[IBSBRNTRGT-78]
+	_ = x[IBSFETCHSAM-79]
+	_ = x[IBSFFV-80]
+	_ = x[IBSOPCNT-81]
+	_ = x[IBSOPCNTEXT-82]
+	_ = x[IBSOPSAM-83]
+	_ = x[IBSRDWROPCNT-84]
+	_ = x[IBSRIPINVALIDCHK-85]
+	_ = x[IBS_FETCH_CTLX-86]
+	_ = x[IBS_OPDATA4-87]
+	_ = x[IBS_OPFUSE-88]
+	_ = x[IBS_PREVENTHOST-89]
+	_ = x[IBS_ZEN4-90]
+	_ = x[IDPRED_CTRL-91]
+	_ = x[INT_WBINVD-92]
+	_ = x[INVLPGB-93]
+	_ = x[KEYLOCKER-94]
+	_ = x[KEYLOCKERW-95]
+	_ = x[LAHF-96]
+	_ = x[LAM-97]
+	_ = x[LBRVIRT-98]
+	_ = x[LZCNT-99]
+	_ = x[MCAOVERFLOW-100]
+	_ = x[MCDT_NO-101]
+	_ = x[MCOMMIT-102]
+	_ = x[MD_CLEAR-103]
+	_ = x[MMX-104]
+	_ = x[MMXEXT-105]
+	_ = x[MOVBE-106]
+	_ = x[MOVDIR64B-107]
+	_ = x[MOVDIRI-108]
+	_ = x[MOVSB_ZL-109]
+	_ = x[MOVU-110]
+	_ = x[MPX-111]
+	_ = x[MSRIRC-112]
+	_ = x[MSRLIST-113]
+	_ = x[MSR_PAGEFLUSH-114]
+	_ = x[NRIPS-115]
+	_ = x[NX-116]
+	_ = x[OSXSAVE-117]
+	_ = x[PCONFIG-118]
+	_ = x[POPCNT-119]
+	_ = x[PPIN-120]
+	_ = x[PREFETCHI-121]
+	_ = x[PSFD-122]
+	_ = x[RDPRU-123]
+	_ = x[RDRAND-124]
+	_ = x[RDSEED-125]
+	_ = x[RDTSCP-126]
+	_ = x[RRSBA_CTRL-127]
+	_ = x[RTM-128]
+	_ = x[RTM_ALWAYS_ABORT-129]
+	_ = x[SERIALIZE-130]
+	_ = x[SEV-131]
+	_ = x[SEV_64BIT-132]
+	_ = x[SEV_ALTERNATIVE-133]
+	_ = x[SEV_DEBUGSWAP-134]
+	_ = x[SEV_ES-135]
+	_ = x[SEV_RESTRICTED-136]
+	_ = x[SEV_SNP-137]
+	_ = x[SGX-138]
+	_ = x[SGXLC-139]
+	_ = x[SHA-140]
+	_ = x[SME-141]
+	_ = x[SME_COHERENT-142]
+	_ = x[SPEC_CTRL_SSBD-143]
+	_ = x[SRBDS_CTRL-144]
+	_ = x[SSE-145]
+	_ = x[SSE2-146]
+	_ = x[SSE3-147]
+	_ = x[SSE4-148]
+	_ = x[SSE42-149]
+	_ = x[SSE4A-150]
+	_ = x[SSSE3-151]
+	_ = x[STIBP-152]
+	_ = x[STIBP_ALWAYSON-153]
+	_ = x[STOSB_SHORT-154]
+	_ = x[SUCCOR-155]
+	_ = x[SVM-156]
+	_ = x[SVMDA-157]
+	_ = x[SVMFBASID-158]
+	_ = x[SVML-159]
+	_ = x[SVMNP-160]
+	_ = x[SVMPF-161]
+	_ = x[SVMPFT-162]
+	_ = x[SYSCALL-163]
+	_ = x[SYSEE-164]
+	_ = x[TBM-165]
+	_ = x[TDX_GUEST-166]
+	_ = x[TLB_FLUSH_NESTED-167]
+	_ = x[TME-168]
+	_ = x[TOPEXT-169]
+	_ = x[TSCRATEMSR-170]
+	_ = x[TSXLDTRK-171]
+	_ = x[VAES-172]
+	_ = x[VMCBCLEAN-173]
+	_ = x[VMPL-174]
+	_ = x[VMSA_REGPROT-175]
+	_ = x[VMX-176]
+	_ = x[VPCLMULQDQ-177]
+	_ = x[VTE-178]
+	_ = x[WAITPKG-179]
+	_ = x[WBNOINVD-180]
+	_ = x[WRMSRNS-181]
+	_ = x[X87-182]
+	_ = x[XGETBV1-183]
+	_ = x[XOP-184]
+	_ = x[XSAVE-185]
+	_ = x[XSAVEC-186]
+	_ = x[XSAVEOPT-187]
+	_ = x[XSAVES-188]
+	_ = x[AESARM-189]
+	_ = x[ARMCPUID-190]
+	_ = x[ASIMD-191]
+	_ = x[ASIMDDP-192]
+	_ = x[ASIMDHP-193]
+	_ = x[ASIMDRDM-194]
+	_ = x[ATOMICS-195]
+	_ = x[CRC32-196]
+	_ = x[DCPOP-197]
+	_ = x[EVTSTRM-198]
+	_ = x[FCMA-199]
+	_ = x[FP-200]
+	_ = x[FPHP-201]
+	_ = x[GPA-202]
+	_ = x[JSCVT-203]
+	_ = x[LRCPC-204]
+	_ = x[PMULL-205]
+	_ = x[SHA1-206]
+	_ = x[SHA2-207]
+	_ = x[SHA3-208]
+	_ = x[SHA512-209]
+	_ = x[SM3-210]
+	_ = x[SM4-211]
+	_ = x[SVE-212]
+	_ = x[lastID-213]
 	_ = x[firstID-0]
 }
 
-const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
 
-var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465}
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 554, 568, 585, 588, 598, 609, 615, 623, 634, 642, 654, 670, 684, 695, 705, 720, 728, 739, 749, 756, 765, 775, 779, 782, 789, 794, 805, 812, 819, 827, 830, 836, 841, 850, 857, 865, 869, 872, 878, 885, 898, 903, 905, 912, 919, 925, 929, 938, 942, 947, 953, 959, 965, 975, 978, 994, 1003, 1006, 1015, 1030, 1043, 1049, 1063, 1070, 1073, 1078, 1081, 1084, 1096, 1110, 1120, 1123, 1127, 1131, 1135, 1140, 1145, 1150, 1155, 1169, 1180, 1186, 1189, 1194, 1203, 1207, 1212, 1217, 1223, 1230, 1235, 1238, 1247, 1263, 1266, 1272, 1282, 1290, 1294, 1303, 1307, 1319, 1322, 1332, 1335, 1342, 1350, 1357, 1360, 1367, 1370, 1375, 1381, 1389, 1395, 1401, 1409, 1414, 1421, 1428, 1436, 1443, 1448, 1453, 1460, 1464, 1466, 1470, 1473, 1478, 1483, 1488, 1492, 1496, 1500, 1506, 1509, 1512, 1515, 1521}
 
 func (i FeatureID) String() string {
 	if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
deleted file mode 100644
index 5d8cb5b72e730c77fe7a890cbb269282caaae34e..0000000000000000000000000000000000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
deleted file mode 100644
index e16fb946bb9bb3a2d5ad2442ffa1469d28ef066c..0000000000000000000000000000000000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
deleted file mode 100644
index 81be214370d433d0aef7881d46c0264b5880fa60..0000000000000000000000000000000000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
-	go test -cover -v -coverprofile=cover.dat ./...
-	go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
deleted file mode 100644
index 258c0636aac8c50be39cca1364c2357cbda33327..0000000000000000000000000000000000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
-	"encoding/binary"
-	"errors"
-	"io"
-
-	"github.com/golang/protobuf/proto"
-)
-
-var errInvalidVarint = errors.New("invalid varint32 encountered")
-
-// ReadDelimited decodes a message from the provided length-delimited stream,
-// where the length is encoded as 32-bit varint prefix to the message body.
-// It returns the total number of bytes read and any applicable error.  This is
-// roughly equivalent to the companion Java API's
-// MessageLite#parseDelimitedFrom.  As per the reader contract, this function
-// calls r.Read repeatedly as required until exactly one message including its
-// prefix is read and decoded (or an error has occurred).  The function never
-// reads more bytes from the stream than required.  The function never returns
-// an error if a message has been read and decoded correctly, even if the end
-// of the stream has been reached in doing so.  In that case, any subsequent
-// calls return (0, io.EOF).
-func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
-	// Per AbstractParser#parsePartialDelimitedFrom with
-	// CodedInputStream#readRawVarint32.
-	var headerBuf [binary.MaxVarintLen32]byte
-	var bytesRead, varIntBytes int
-	var messageLength uint64
-	for varIntBytes == 0 { // i.e. no varint has been decoded yet.
-		if bytesRead >= len(headerBuf) {
-			return bytesRead, errInvalidVarint
-		}
-		// We have to read byte by byte here to avoid reading more bytes
-		// than required. Each read byte is appended to what we have
-		// read before.
-		newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
-		if newBytesRead == 0 {
-			if err != nil {
-				return bytesRead, err
-			}
-			// A Reader should not return (0, nil), but if it does,
-			// it should be treated as no-op (according to the
-			// Reader contract). So let's go on...
-			continue
-		}
-		bytesRead += newBytesRead
-		// Now present everything read so far to the varint decoder and
-		// see if a varint can be decoded already.
-		messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
-	}
-
-	messageBuf := make([]byte, messageLength)
-	newBytesRead, err := io.ReadFull(r, messageBuf)
-	bytesRead += newBytesRead
-	if err != nil {
-		return bytesRead, err
-	}
-
-	return bytesRead, proto.Unmarshal(messageBuf, m)
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
deleted file mode 100644
index 8fb59ad226ff0962630d2539bd4d3ae9a53ea58c..0000000000000000000000000000000000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
-	"encoding/binary"
-	"io"
-
-	"github.com/golang/protobuf/proto"
-)
-
-// WriteDelimited encodes and dumps a message to the provided writer prefixed
-// with a 32-bit varint indicating the length of the encoded message, producing
-// a length-delimited record stream, which can be used to chain together
-// encoded messages of the same type together in a file.  It returns the total
-// number of bytes written and any applicable error.  This is roughly
-// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
-func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
-	buffer, err := proto.Marshal(m)
-	if err != nil {
-		return 0, err
-	}
-
-	var buf [binary.MaxVarintLen32]byte
-	encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
-
-	sync, err := w.Write(buf[:encodedLength])
-	if err != nil {
-		return sync, err
-	}
-
-	n, err = w.Write(buffer)
-	return n + sync, err
-}
diff --git a/vendor/github.com/minio/minio-go/v7/CREDITS b/vendor/github.com/minio/minio-go/v7/CREDITS
new file mode 100644
index 0000000000000000000000000000000000000000..d2092318141f3101b2e28ac9d18bde83ef493bed
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/CREDITS
@@ -0,0 +1,1806 @@
+Go (the standard library)
+https://golang.org/
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+github.com/davecgh/go-spew
+https://github.com/davecgh/go-spew
+----------------------------------------------------------------
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+================================================================
+
+github.com/dustin/go-humanize
+https://github.com/dustin/go-humanize
+----------------------------------------------------------------
+Copyright (c) 2005-2008  Dustin Sallings <dustin@spy.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+<http://www.opensource.org/licenses/mit-license.php>
+
+================================================================
+
+github.com/google/uuid
+https://github.com/google/uuid
+----------------------------------------------------------------
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+github.com/json-iterator/go
+https://github.com/json-iterator/go
+----------------------------------------------------------------
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+================================================================
+
+github.com/klauspost/compress
+https://github.com/klauspost/compress
+----------------------------------------------------------------
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+
+Files: gzhttp/*
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016-2017 The New York Times Company
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+------------------
+
+Files: s2/cmd/internal/readahead/*
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+---------------------
+Files: snappy/*
+Files: internal/snapref/*
+
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+================================================================
+
+github.com/klauspost/cpuid/v2
+https://github.com/klauspost/cpuid/v2
+----------------------------------------------------------------
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+================================================================
+
+github.com/minio/md5-simd
+https://github.com/minio/md5-simd
+----------------------------------------------------------------
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+================================================================
+
+github.com/minio/sha256-simd
+https://github.com/minio/sha256-simd
+----------------------------------------------------------------
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+================================================================
+
+github.com/modern-go/concurrent
+https://github.com/modern-go/concurrent
+----------------------------------------------------------------
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+================================================================
+
+github.com/modern-go/reflect2
+https://github.com/modern-go/reflect2
+----------------------------------------------------------------
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+================================================================
+
+github.com/pmezard/go-difflib
+https://github.com/pmezard/go-difflib
+----------------------------------------------------------------
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+github.com/rs/xid
+https://github.com/rs/xid
+----------------------------------------------------------------
+Copyright (c) 2015 Olivier Poitrey <rs@dailymotion.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+================================================================
+
+github.com/sirupsen/logrus
+https://github.com/sirupsen/logrus
+----------------------------------------------------------------
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+================================================================
+
+github.com/stretchr/testify
+https://github.com/stretchr/testify
+----------------------------------------------------------------
+MIT License
+
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+================================================================
+
+golang.org/x/crypto
+https://golang.org/x/crypto
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+golang.org/x/net
+https://golang.org/x/net
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+golang.org/x/sys
+https://golang.org/x/sys
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+golang.org/x/text
+https://golang.org/x/text
+----------------------------------------------------------------
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+================================================================
+
+gopkg.in/ini.v1
+https://gopkg.in/ini.v1
+----------------------------------------------------------------
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright 2014 Unknwon
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+================================================================
+
+gopkg.in/yaml.v3
+https://gopkg.in/yaml.v3
+----------------------------------------------------------------
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+    apic.go emitterc.go parserc.go readerc.go scannerc.go
+    writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+================================================================
+
diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md
index 9b6bbbec342cdf966333d74b184afb5de129ec45..82f70a131a1b54d973f31d26939d5c7ddd09fcbe 100644
--- a/vendor/github.com/minio/minio-go/v7/README.md
+++ b/vendor/github.com/minio/minio-go/v7/README.md
@@ -1,23 +1,28 @@
 # MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
 
-The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
+The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage.
 
-This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
+This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader.
+For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
 
-This document assumes that you have a working [Go development environment](https://golang.org/doc/install).
+These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html).
 
 ## Download from Github
+
+From your project directory:
+
 ```sh
 go get github.com/minio/minio-go/v7
 ```
 
-## Initialize MinIO Client
-MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
+## Initialize a MinIO Client Object
+
+The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage:
 
-| Parameter  | Description|
-| :---         |     :---     |
-| endpoint   | URL to object storage service.   |
-| _minio.Options_ | All the options such as credentials, custom transport etc. |
+| Parameter         | Description                                                |
+| ----------------- | ---------------------------------------------------------- |
+| `endpoint`        | URL to object storage service.                             |
+| `_minio.Options_` | All the options such as credentials, custom transport etc. |
 
 ```go
 package main
@@ -48,13 +53,25 @@ func main() {
 }
 ```
 
-## Quick Start Example - File Uploader
-This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
+## Example - File Uploader
 
-We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
+This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket.
+It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io).
+
+The `play` server runs the latest stable version of MinIO and may be used for testing and development.
+The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected.
 
 ### FileUploader.go
+
+This example does the following:
+
+- Connects to the MinIO `play` server using the provided credentials.
+- Creates a bucket named `testbucket`.
+- Uploads a file named `testdata` from `/tmp`.
+- Verifies the file was created using `mc ls`.
+
 ```go
+// FileUploader.go MinIO example
 package main
 
 import (
@@ -81,8 +98,8 @@ func main() {
 		log.Fatalln(err)
 	}
 
-	// Make a new bucket called mymusic.
-	bucketName := "mymusic"
+	// Make a new bucket called testbucket.
+	bucketName := "testbucket"
 	location := "us-east-1"
 
 	err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
@@ -98,12 +115,13 @@ func main() {
 		log.Printf("Successfully created %s\n", bucketName)
 	}
 
-	// Upload the zip file
-	objectName := "golden-oldies.zip"
-	filePath := "/tmp/golden-oldies.zip"
-	contentType := "application/zip"
+	// Upload the test file
+	// Change the value of filePath if the file is in another location
+	objectName := "testdata"
+	filePath := "/tmp/testdata"
+	contentType := "application/octet-stream"
 
-	// Upload the zip file with FPutObject
+	// Upload the test file with FPutObject
 	info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
 	if err != nil {
 		log.Fatalln(err)
@@ -113,22 +131,51 @@ func main() {
 }
 ```
 
-### Run FileUploader
+**1. Create a test file containing data:**
+
+You can do this with `dd` on Linux or macOS systems:
+
+```sh
+dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10
+```
+
+or `fsutil` on Windows:
+
+```sh
+fsutil file createnew "C:\Users\<username>\Desktop\sample.txt" 20480
+```
+
+**2. Run FileUploader with the following commands:**
+
 ```sh
-go run file-uploader.go
-2016/08/13 17:03:28 Successfully created mymusic
-2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
+go mod init example/FileUploader
+go get github.com/minio/minio-go/v7
+go get github.com/minio/minio-go/v7/pkg/credentials
+go run FileUploader.go
+```
+
+The output resembles the following:
 
-mc ls play/mymusic/
-[2016-05-27 16:02:16 PDT]  17MiB golden-oldies.zip
+```sh
+2023/11/01 14:27:55 Successfully created testbucket
+2023/11/01 14:27:55 Successfully uploaded testdata of size 20480
+```
+
+**3. Verify the Uploaded File With `mc ls`:**
+
+```sh
+mc ls play/testbucket
+[2023-11-01 14:27:55 UTC]  20KiB STANDARD TestDataFile
 ```
 
 ## API Reference
+
 The full API Reference is available here.
 
 * [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
 
 ### API Reference : Bucket Operations
+
 * [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
 * [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
 * [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
@@ -137,10 +184,12 @@ The full API Reference is available here.
 * [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
 
 ### API Reference : Bucket policy Operations
+
 * [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
 * [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
 
 ### API Reference : Bucket notification Operations
+
 * [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
 * [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
 * [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
@@ -148,10 +197,12 @@ The full API Reference is available here.
 * [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension)
 
 ### API Reference : File Object Operations
+
 * [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
 * [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject)
 
 ### API Reference : Object Operations
+
 * [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
 * [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
 * [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
@@ -162,14 +213,15 @@ The full API Reference is available here.
 * [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
 * [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
 
-
 ### API Reference : Presigned Operations
+
 * [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
 * [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
 * [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
 * [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
 
 ### API Reference : Client custom settings
+
 * [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
 * [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
 * [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
@@ -177,6 +229,7 @@ The full API Reference is available here.
 ## Full Examples
 
 ### Full Examples : Bucket Operations
+
 * [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
 * [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
 * [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
@@ -186,25 +239,30 @@ The full API Reference is available here.
 * [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
 
 ### Full Examples : Bucket policy Operations
+
 * [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
 * [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
 * [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
 
 ### Full Examples : Bucket lifecycle Operations
+
 * [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
 * [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
 
 ### Full Examples : Bucket encryption Operations
+
 * [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
 * [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
 * [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
 
 ### Full Examples : Bucket replication Operations
+
 * [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
 * [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
 * [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
 
 ### Full Examples : Bucket notification Operations
+
 * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
 * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
 * [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
@@ -212,10 +270,12 @@ The full API Reference is available here.
 * [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension)
 
 ### Full Examples : File Object Operations
+
 * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
 * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
 
 ### Full Examples : Object Operations
+
 * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
 * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
 * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
@@ -225,22 +285,28 @@ The full API Reference is available here.
 * [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
 
 ### Full Examples : Encrypted Object Operations
+
 * [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
 * [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
 * [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
 
 ### Full Examples : Presigned Operations
+
 * [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
 * [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
 * [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
 * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
 
 ## Explore Further
+
+* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7)
 * [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html)
 * [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
 
 ## Contribute
+
 [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
 
 ## License
+
 This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1155c3723925cb9296088d2f41b066d4d1b695f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go
@@ -0,0 +1,201 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2020 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+	"context"
+	"encoding/xml"
+	"errors"
+	"net/http"
+	"net/url"
+	"strconv"
+	"time"
+
+	"github.com/minio/minio-go/v7/pkg/encrypt"
+	"github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// ObjectAttributesOptions are options used for the GetObjectAttributes API
+//
+// - MaxParts
+// How many parts the caller wants to be returned (default: 1000)
+//
+// - VersionID
+// The object version you want to attributes for
+//
+// - PartNumberMarker
+// the listing will start AFTER the part matching PartNumberMarker
+//
+// - ServerSideEncryption
+// The server-side encryption algorithm used when storing this object in Minio
+type ObjectAttributesOptions struct {
+	MaxParts             int
+	VersionID            string
+	PartNumberMarker     int
+	ServerSideEncryption encrypt.ServerSide
+}
+
+// ObjectAttributes is the response object returned by the GetObjectAttributes API
+//
+// - VersionID
+// The object version
+//
+// - LastModified
+// The last time the object was modified
+//
+// - ObjectAttributesResponse
+// Contains more information about the object
+type ObjectAttributes struct {
+	VersionID    string
+	LastModified time.Time
+	ObjectAttributesResponse
+}
+
+// ObjectAttributesResponse contains details returned by the GetObjectAttributes API
+//
+// Noteworthy fields:
+//
+// - ObjectParts.PartsCount
+// Contains the total part count for the object (not the current response)
+//
+// - ObjectParts.PartNumberMarker
+// Pagination of parts will begin at (but not include) PartNumberMarker
+//
+// - ObjectParts.NextPartNumberMarket
+// The next PartNumberMarker to be used in order to continue pagination
+//
+// - ObjectParts.IsTruncated
+// Indicates if the last part is included in the request (does not check if parts are missing from the start of the list, ONLY the end)
+//
+// - ObjectParts.MaxParts
+// Reflects the MaxParts used by the caller or the default MaxParts value of the API
+type ObjectAttributesResponse struct {
+	ETag         string `xml:",omitempty"`
+	StorageClass string
+	ObjectSize   int
+	Checksum     struct {
+		ChecksumCRC32  string `xml:",omitempty"`
+		ChecksumCRC32C string `xml:",omitempty"`
+		ChecksumSHA1   string `xml:",omitempty"`
+		ChecksumSHA256 string `xml:",omitempty"`
+	}
+	ObjectParts struct {
+		PartsCount           int
+		PartNumberMarker     int
+		NextPartNumberMarker int
+		MaxParts             int
+		IsTruncated          bool
+		Parts                []*ObjectAttributePart `xml:"Part"`
+	}
+}
+
+// ObjectAttributePart is used by ObjectAttributesResponse to describe an object part
+type ObjectAttributePart struct {
+	ChecksumCRC32  string `xml:",omitempty"`
+	ChecksumCRC32C string `xml:",omitempty"`
+	ChecksumSHA1   string `xml:",omitempty"`
+	ChecksumSHA256 string `xml:",omitempty"`
+	PartNumber     int
+	Size           int
+}
+
+func (o *ObjectAttributes) parseResponse(resp *http.Response) (err error) {
+	mod, err := parseRFC7231Time(resp.Header.Get("Last-Modified"))
+	if err != nil {
+		return err
+	}
+	o.LastModified = mod
+	o.VersionID = resp.Header.Get(amzVersionID)
+
+	response := new(ObjectAttributesResponse)
+	if err := xml.NewDecoder(resp.Body).Decode(response); err != nil {
+		return err
+	}
+	o.ObjectAttributesResponse = *response
+
+	return
+}
+
+// GetObjectAttributes API combines HeadObject and ListParts.
+// More details on usage can be found in the documentation for ObjectAttributesOptions{}
+func (c *Client) GetObjectAttributes(ctx context.Context, bucketName, objectName string, opts ObjectAttributesOptions) (*ObjectAttributes, error) {
+	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+		return nil, err
+	}
+
+	if err := s3utils.CheckValidObjectName(objectName); err != nil {
+		return nil, err
+	}
+
+	urlValues := make(url.Values)
+	urlValues.Add("attributes", "")
+	if opts.VersionID != "" {
+		urlValues.Add("versionId", opts.VersionID)
+	}
+
+	headers := make(http.Header)
+	headers.Set(amzObjectAttributes, GetObjectAttributesTags)
+
+	if opts.PartNumberMarker > 0 {
+		headers.Set(amzPartNumberMarker, strconv.Itoa(opts.PartNumberMarker))
+	}
+
+	if opts.MaxParts > 0 {
+		headers.Set(amzMaxParts, strconv.Itoa(opts.MaxParts))
+	} else {
+		headers.Set(amzMaxParts, strconv.Itoa(GetObjectAttributesMaxParts))
+	}
+
+	if opts.ServerSideEncryption != nil {
+		opts.ServerSideEncryption.Marshal(headers)
+	}
+
+	resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+		bucketName:       bucketName,
+		objectName:       objectName,
+		queryValues:      urlValues,
+		contentSHA256Hex: emptySHA256Hex,
+		customHeader:     headers,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	defer closeResponse(resp)
+
+	hasEtag := resp.Header.Get(ETag)
+	if hasEtag != "" {
+		return nil, errors.New("getObjectAttributes is not supported by the current endpoint version")
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		ER := new(ErrorResponse)
+		if err := xml.NewDecoder(resp.Body).Decode(ER); err != nil {
+			return nil, err
+		}
+
+		return nil, *ER
+	}
+
+	OA := new(ObjectAttributes)
+	err = OA.parseResponse(resp)
+	if err != nil {
+		return nil, err
+	}
+
+	return OA, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
index e31e4cf929e818906f4de1c180a34c2936207878..9e6b1543c4de5c6af0c7d8f0534d4d8751a4cff8 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -550,6 +550,8 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
 		}
 	}
 
+	newOffset := o.currOffset
+
 	// Switch through whence.
 	switch whence {
 	default:
@@ -558,12 +560,12 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
 		if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
 			return 0, io.EOF
 		}
-		o.currOffset = offset
+		newOffset = offset
 	case 1:
 		if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
 			return 0, io.EOF
 		}
-		o.currOffset += offset
+		newOffset += offset
 	case 2:
 		// If we don't know the object size return an error for io.SeekEnd
 		if o.objectInfo.Size < 0 {
@@ -579,7 +581,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
 		if o.objectInfo.Size+offset < 0 {
 			return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
 		}
-		o.currOffset = o.objectInfo.Size + offset
+		newOffset = o.objectInfo.Size + offset
 	}
 	// Reset the saved error since we successfully seeked, let the Read
 	// and ReadAt decide.
@@ -587,8 +589,9 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
 		o.prevErr = nil
 	}
 
-	// Ask lower level to fetch again from source
-	o.seekData = true
+	// Ask lower level to fetch again from source when necessary
+	o.seekData = (newOffset != o.currOffset) || o.seekData
+	o.currOffset = newOffset
 
 	// Return the effective offset.
 	return o.currOffset, nil
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go
index bb86a59941a9fefc3213b25b1cb85cb36339f991..a0216e2018bfc6304e9a2b668d740f5f991861e2 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-options.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go
@@ -87,10 +87,10 @@ func (o *GetObjectOptions) Set(key, value string) {
 }
 
 // SetReqParam - set request query string parameter
-// supported key: see supportedQueryValues.
+// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
 // If an unsupported key is passed in, it will be ignored and nothing will be done.
 func (o *GetObjectOptions) SetReqParam(key, value string) {
-	if !isStandardQueryValue(key) {
+	if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
 		// do nothing
 		return
 	}
@@ -101,10 +101,10 @@ func (o *GetObjectOptions) SetReqParam(key, value string) {
 }
 
 // AddReqParam - add request query string parameter
-// supported key: see supportedQueryValues.
+// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
 // If an unsupported key is passed in, it will be ignored and nothing will be done.
 func (o *GetObjectOptions) AddReqParam(key, value string) {
-	if !isStandardQueryValue(key) {
+	if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
 		// do nothing
 		return
 	}
diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
index 305c36de856152cda5ca0bd2342cc262add8680a..6623e262a8b66d9cc32bf2ef2f1b11bcbf9527f0 100644
--- a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
+++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go
@@ -32,6 +32,12 @@ import (
 // to update tag(s) of a specific object version
 type PutObjectTaggingOptions struct {
 	VersionID string
+	Internal  AdvancedObjectTaggingOptions
+}
+
+// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use.
+type AdvancedObjectTaggingOptions struct {
+	ReplicationProxyRequest string
 }
 
 // PutObjectTagging replaces or creates object tag(s) and can target
@@ -50,7 +56,10 @@ func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName st
 	if opts.VersionID != "" {
 		urlValues.Set("versionId", opts.VersionID)
 	}
-
+	headers := make(http.Header, 0)
+	if opts.Internal.ReplicationProxyRequest != "" {
+		headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
+	}
 	reqBytes, err := xml.Marshal(otags)
 	if err != nil {
 		return err
@@ -63,6 +72,7 @@ func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName st
 		contentBody:      bytes.NewReader(reqBytes),
 		contentLength:    int64(len(reqBytes)),
 		contentMD5Base64: sumMD5Base64(reqBytes),
+		customHeader:     headers,
 	}
 
 	// Execute PUT to set a object tagging.
@@ -83,6 +93,7 @@ func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName st
 // to fetch the tagging key/value pairs
 type GetObjectTaggingOptions struct {
 	VersionID string
+	Internal  AdvancedObjectTaggingOptions
 }
 
 // GetObjectTagging fetches object tag(s) with options to target
@@ -96,12 +107,16 @@ func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName st
 	if opts.VersionID != "" {
 		urlValues.Set("versionId", opts.VersionID)
 	}
-
+	headers := make(http.Header, 0)
+	if opts.Internal.ReplicationProxyRequest != "" {
+		headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
+	}
 	// Execute GET on object to get object tag(s)
 	resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
-		bucketName:  bucketName,
-		objectName:  objectName,
-		queryValues: urlValues,
+		bucketName:   bucketName,
+		objectName:   objectName,
+		queryValues:  urlValues,
+		customHeader: headers,
 	})
 
 	defer closeResponse(resp)
@@ -121,6 +136,7 @@ func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName st
 // RemoveObjectTaggingOptions holds the version id of the object to remove
 type RemoveObjectTaggingOptions struct {
 	VersionID string
+	Internal  AdvancedObjectTaggingOptions
 }
 
 // RemoveObjectTagging removes object tag(s) with options to control a specific object
@@ -134,12 +150,16 @@ func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName
 	if opts.VersionID != "" {
 		urlValues.Set("versionId", opts.VersionID)
 	}
-
+	headers := make(http.Header, 0)
+	if opts.Internal.ReplicationProxyRequest != "" {
+		headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
+	}
 	// Execute DELETE on object to remove object tag(s)
 	resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
-		bucketName:  bucketName,
-		objectName:  objectName,
-		queryValues: urlValues,
+		bucketName:   bucketName,
+		objectName:   objectName,
+		queryValues:  urlValues,
+		customHeader: headers,
 	})
 
 	defer closeResponse(resp)
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index 2c4de4f96bb43fd93d40087907ef3994db29edbf..bbd8924e28eeefab0c52e81bcd34ce4048a6f390 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -77,6 +77,7 @@ type PutObjectOptions struct {
 	ContentDisposition      string
 	ContentLanguage         string
 	CacheControl            string
+	Expires                 time.Time
 	Mode                    RetentionMode
 	RetainUntilDate         time.Time
 	ServerSideEncryption    encrypt.ServerSide
@@ -153,6 +154,10 @@ func (opts PutObjectOptions) Header() (header http.Header) {
 		header.Set("Cache-Control", opts.CacheControl)
 	}
 
+	if !opts.Expires.IsZero() {
+		header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat))
+	}
+
 	if opts.Mode != "" {
 		header.Set(amzLockMode, opts.Mode.String())
 	}
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
index 983ed674495767636df195f5a6667ebc01bc077f..eb4da41471b83db765fb818fe97363497756c653 100644
--- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
+++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
@@ -24,6 +24,7 @@ import (
 	"context"
 	"fmt"
 	"io"
+	"net/http"
 	"os"
 	"strings"
 	"sync"
@@ -70,6 +71,14 @@ type SnowballObject struct {
 	// Exactly 'Size' number of bytes must be provided.
 	Content io.Reader
 
+	// VersionID of the object; if empty, a new versionID will be generated
+	VersionID string
+
+	// Headers contains more options for this object upload, the same as you
+	// would include in a regular PutObject operation, such as user metadata
+	// and content-disposition, expires, ..
+	Headers http.Header
+
 	// Close will be called when an object has finished processing.
 	// Note that if PutObjectsSnowball returns because of an error,
 	// objects not consumed from the input will NOT have been closed.
@@ -181,6 +190,14 @@ objectLoop:
 				header.ModTime = time.Now().UTC()
 			}
 
+			header.PAXRecords = make(map[string]string)
+			if obj.VersionID != "" {
+				header.PAXRecords["minio.versionId"] = obj.VersionID
+			}
+			for k, vals := range obj.Headers {
+				header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",")
+			}
+
 			if err := t.WriteHeader(&header); err != nil {
 				closeObj()
 				return err
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
index 9c0ac449a4d48245c94a466cc8fe56d862f0f3e0..d2e932923f14e5ffef20f9c5434245cb967e813e 100644
--- a/vendor/github.com/minio/minio-go/v7/api-remove.go
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -435,7 +435,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
 
 		// Generate remove multi objects XML request
 		removeBytes := generateRemoveMultiObjectsRequest(batch)
-		// Execute GET on bucket to list objects.
+		// Execute POST on bucket to remove objects.
 		resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
 			bucketName:       bucketName,
 			queryValues:      urlValues,
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index e8e324a9cc8898387989a77f81514274f5ebf73d..1c3cb83cdcc35bd5bbee1f6969e71c5d045d7cd3 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -127,7 +127,7 @@ type Options struct {
 // Global constants.
 const (
 	libraryName    = "minio-go"
-	libraryVersion = "v7.0.63"
+	libraryVersion = "v7.0.67"
 )
 
 // User Agent should always following the below style.
@@ -234,7 +234,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
 	clnt.httpClient = &http.Client{
 		Jar:       jar,
 		Transport: transport,
-		CheckRedirect: func(req *http.Request, via []*http.Request) error {
+		CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
 			return http.ErrUseLastResponse
 		},
 	}
diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go
index 401d2a74d2b04674a690936c547df1f9bd424ffb..4099a37f9a54e29c670ebe04b088827ad7423035 100644
--- a/vendor/github.com/minio/minio-go/v7/constants.go
+++ b/vendor/github.com/minio/minio-go/v7/constants.go
@@ -60,12 +60,32 @@ const (
 )
 
 const (
+	// GetObjectAttributesTags are tags used to defined
+	// return values for the GetObjectAttributes API
+	GetObjectAttributesTags = "ETag,Checksum,StorageClass,ObjectSize,ObjectParts"
+	// GetObjectAttributesMaxParts defined the default maximum
+	// number of parts returned by GetObjectAttributes
+	GetObjectAttributesMaxParts = 1000
+)
+
+const (
+	// Response Headers
+
+	// ETag is a common response header
+	ETag = "ETag"
+
 	// Storage class header.
 	amzStorageClass = "X-Amz-Storage-Class"
 
 	// Website redirect location header
 	amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
 
+	// GetObjectAttributes headers
+	amzPartNumberMarker    = "X-Amz-Part-Number-Marker"
+	amzExpectedBucketOnwer = "X-Amz-Expected-Bucket-Owner"
+	amzMaxParts            = "X-Amz-Max-Parts"
+	amzObjectAttributes    = "X-Amz-Object-Attributes"
+
 	// Object Tagging headers
 	amzTaggingHeader          = "X-Amz-Tagging"
 	amzTaggingHeaderDirective = "X-Amz-Tagging-Directive"
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index f951cd073a9455fa87bbe9e4043517f21e304a39..a6a436c230c4af0df2898681892a4afa17e07163 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -47,6 +47,7 @@ import (
 	"time"
 
 	"github.com/dustin/go-humanize"
+	"github.com/google/uuid"
 	jsoniter "github.com/json-iterator/go"
 	"github.com/minio/sha256-simd"
 	log "github.com/sirupsen/logrus"
@@ -66,13 +67,30 @@ const (
 )
 
 const (
-	serverEndpoint = "SERVER_ENDPOINT"
-	accessKey      = "ACCESS_KEY"
-	secretKey      = "SECRET_KEY"
-	enableHTTPS    = "ENABLE_HTTPS"
-	enableKMS      = "ENABLE_KMS"
+	serverEndpoint     = "SERVER_ENDPOINT"
+	accessKey          = "ACCESS_KEY"
+	secretKey          = "SECRET_KEY"
+	enableHTTPS        = "ENABLE_HTTPS"
+	enableKMS          = "ENABLE_KMS"
+	appVersion         = "0.1.0"
+	skipCERTValidation = "SKIP_CERT_VALIDATION"
 )
 
+func createHTTPTransport() (transport *http.Transport) {
+	var err error
+	transport, err = minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
+	if err != nil {
+		logError("http-transport", getFuncName(), nil, time.Now(), "", "could not create http transport", err)
+		return nil
+	}
+
+	if mustParseBool(os.Getenv(skipCERTValidation)) {
+		transport.TLSClientConfig.InsecureSkipVerify = true
+	}
+
+	return
+}
+
 type mintJSONFormatter struct{}
 
 func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
@@ -425,8 +443,9 @@ func testMakeBucketError() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
+			Transport: createHTTPTransport(),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
@@ -437,7 +456,7 @@ func testMakeBucketError() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -478,14 +497,15 @@ func testMetadataSizeLimit() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
+			Transport: createHTTPTransport(),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
 		return
 	}
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
 	args["bucketName"] = bucketName
@@ -547,8 +567,9 @@ func testMakeBucketRegions() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client creation failed", err)
@@ -559,7 +580,7 @@ func testMakeBucketRegions() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -613,8 +634,9 @@ func testPutObjectReadAt() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -625,7 +647,7 @@ func testPutObjectReadAt() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -711,8 +733,9 @@ func testListObjectVersions() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -723,7 +746,7 @@ func testListObjectVersions() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -830,8 +853,9 @@ func testStatObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -842,7 +866,7 @@ func testStatObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -947,8 +971,9 @@ func testGetObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -959,7 +984,7 @@ func testGetObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1086,8 +1111,9 @@ func testPutObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1098,7 +1124,7 @@ func testPutObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1233,8 +1259,9 @@ func testCopyObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1245,7 +1272,7 @@ func testCopyObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1370,8 +1397,9 @@ func testConcurrentCopyObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1382,7 +1410,7 @@ func testConcurrentCopyObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1530,8 +1558,9 @@ func testComposeObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1542,7 +1571,7 @@ func testComposeObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1670,8 +1699,9 @@ func testRemoveObjectWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1682,7 +1712,7 @@ func testRemoveObjectWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1782,8 +1812,9 @@ func testRemoveObjectsWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1794,7 +1825,7 @@ func testRemoveObjectsWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -1877,8 +1908,9 @@ func testObjectTaggingWithVersioning() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -1889,7 +1921,7 @@ func testObjectTaggingWithVersioning() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2044,8 +2076,9 @@ func testPutObjectWithChecksums() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -2056,7 +2089,7 @@ func testPutObjectWithChecksums() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2238,8 +2271,9 @@ func testPutMultipartObjectWithChecksums() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -2250,7 +2284,7 @@ func testPutMultipartObjectWithChecksums() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2408,6 +2442,7 @@ func testTrailingChecksums() {
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
 			Creds:           credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport:       createHTTPTransport(),
 			Secure:          mustParseBool(os.Getenv(enableHTTPS)),
 			TrailingHeaders: true,
 		})
@@ -2420,7 +2455,7 @@ func testTrailingChecksums() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2621,6 +2656,7 @@ func testPutObjectWithAutomaticChecksums() {
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
 			Creds:           credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport:       createHTTPTransport(),
 			Secure:          mustParseBool(os.Getenv(enableHTTPS)),
 			TrailingHeaders: true,
 		})
@@ -2630,7 +2666,7 @@ func testPutObjectWithAutomaticChecksums() {
 	}
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2754,6 +2790,558 @@ func testPutObjectWithAutomaticChecksums() {
 	successLogger(testName, function, args, startTime).Info()
 }
 
+func testGetObjectAttributes() {
+	startTime := time.Now()
+	testName := getFuncName()
+	function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+		"opts":       "minio.ObjectAttributesOptions{}",
+	}
+
+	if !isFullMode() {
+		ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
+		return
+	}
+
+	c, err := minio.New(os.Getenv(serverEndpoint),
+		&minio.Options{
+			TrailingHeaders: true,
+			Creds:           credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport:       createHTTPTransport(),
+			Secure:          mustParseBool(os.Getenv(enableHTTPS)),
+		})
+	if err != nil {
+		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+		return
+	}
+
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+	args["bucketName"] = bucketName
+	err = c.MakeBucket(
+		context.Background(),
+		bucketName,
+		minio.MakeBucketOptions{Region: "us-east-1"},
+	)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Make bucket failed", err)
+		return
+	}
+
+	bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-")
+	args["bucketName"] = bucketNameV
+	err = c.MakeBucket(
+		context.Background(),
+		bucketNameV,
+		minio.MakeBucketOptions{Region: "us-east-1"},
+	)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Make bucket failed", err)
+		return
+	}
+	err = c.EnableVersioning(context.Background(), bucketNameV)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Unable to enable versioning", err)
+		return
+	}
+
+	defer cleanupBucket(bucketName, c)
+	defer cleanupVersionedBucket(bucketNameV, c)
+
+	testFiles := make(map[string]*objectAttributesNewObject)
+	testFiles["file1"] = &objectAttributesNewObject{
+		Object:           "file1",
+		ObjectReaderType: "datafile-1.03-MB",
+		Bucket:           bucketNameV,
+		ContentType:      "custom/contenttype",
+		SendContentMd5:   false,
+	}
+
+	testFiles["file2"] = &objectAttributesNewObject{
+		Object:           "file2",
+		ObjectReaderType: "datafile-129-MB",
+		Bucket:           bucketName,
+		ContentType:      "custom/contenttype",
+		SendContentMd5:   false,
+	}
+
+	for i, v := range testFiles {
+		bufSize := dataFileMap[v.ObjectReaderType]
+
+		reader := getDataReader(v.ObjectReaderType)
+
+		args["objectName"] = v.Object
+		testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{
+			ContentType:    v.ContentType,
+			SendContentMd5: v.SendContentMd5,
+		})
+
+		if err != nil {
+			logError(testName, function, args, startTime, "", "PutObject failed", err)
+			return
+		}
+	}
+
+	testTable := make(map[string]objectAttributesTableTest)
+
+	testTable["none-versioned"] = objectAttributesTableTest{
+		opts: minio.ObjectAttributesOptions{},
+		test: objectAttributesTestOptions{
+			TestFileName:     "file2",
+			StorageClass:     "STANDARD",
+			HasFullChecksum:  true,
+			HasPartChecksums: true,
+			HasParts:         true,
+		},
+	}
+
+	testTable["0-to-0-marker"] = objectAttributesTableTest{
+		opts: minio.ObjectAttributesOptions{
+			PartNumberMarker: 0,
+			MaxParts:         0,
+		},
+		test: objectAttributesTestOptions{
+			TestFileName:     "file2",
+			StorageClass:     "STANDARD",
+			HasFullChecksum:  true,
+			HasPartChecksums: true,
+			HasParts:         true,
+		},
+	}
+
+	testTable["0-marker-to-max"] = objectAttributesTableTest{
+		opts: minio.ObjectAttributesOptions{
+			PartNumberMarker: 0,
+			MaxParts:         10000,
+		},
+		test: objectAttributesTestOptions{
+			TestFileName:     "file2",
+			StorageClass:     "STANDARD",
+			HasFullChecksum:  true,
+			HasPartChecksums: true,
+			HasParts:         true,
+		},
+	}
+
+	testTable["0-to-1-marker"] = objectAttributesTableTest{
+		opts: minio.ObjectAttributesOptions{
+			PartNumberMarker: 0,
+			MaxParts:         1,
+		},
+		test: objectAttributesTestOptions{
+			TestFileName:     "file2",
+			StorageClass:     "STANDARD",
+			HasFullChecksum:  true,
+			HasPartChecksums: true,
+			HasParts:         true,
+		},
+	}
+
+	testTable["7-to-6-marker"] = objectAttributesTableTest{
+		opts: minio.ObjectAttributesOptions{
+			PartNumberMarker: 7,
+			MaxParts:         6,
+		},
+		test: objectAttributesTestOptions{
+			TestFileName:     "file2",
+			StorageClass:     "STANDARD",
+			HasFullChecksum:  true,
+			HasPartChecksums: true,
+			HasParts:         true,
+		},
+	}
+
+	testTable["versioned"] = objectAttributesTableTest{
+		opts: minio.ObjectAttributesOptions{},
+		test: objectAttributesTestOptions{
+			TestFileName:    "file1",
+			StorageClass:    "STANDARD",
+			HasFullChecksum: false,
+		},
+	}
+
+	for i, v := range testTable {
+
+		tf, ok := testFiles[v.test.TestFileName]
+		if !ok {
+			continue
+		}
+
+		args["objectName"] = tf.Object
+		args["bucketName"] = tf.Bucket
+		if tf.UploadInfo.VersionID != "" {
+			v.opts.VersionID = tf.UploadInfo.VersionID
+		}
+
+		s, err := c.GetObjectAttributes(context.Background(), tf.Bucket, tf.Object, v.opts)
+		if err != nil {
+			logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
+			return
+		}
+
+		v.test.NumberOfParts = s.ObjectParts.PartsCount
+		v.test.ETag = tf.UploadInfo.ETag
+		v.test.ObjectSize = int(tf.UploadInfo.Size)
+
+		err = validateObjectAttributeRequest(s, &v.opts, &v.test)
+		if err != nil {
+			logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed, table test: "+i, err)
+			return
+		}
+
+	}
+
+	successLogger(testName, function, args, startTime).Info()
+}
+
+func testGetObjectAttributesSSECEncryption() {
+	startTime := time.Now()
+	testName := getFuncName()
+	function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+		"opts":       "minio.ObjectAttributesOptions{}",
+	}
+
+	if !isFullMode() {
+		ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
+		return
+	}
+
+	c, err := minio.New(os.Getenv(serverEndpoint),
+		&minio.Options{
+			TrailingHeaders: true,
+			Creds:           credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Secure:          mustParseBool(os.Getenv(enableHTTPS)),
+			Transport:       createHTTPTransport(),
+		})
+	if err != nil {
+		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+		return
+	}
+
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+	args["bucketName"] = bucketName
+	err = c.MakeBucket(
+		context.Background(),
+		bucketName,
+		minio.MakeBucketOptions{Region: "us-east-1"},
+	)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Make bucket failed", err)
+		return
+	}
+
+	defer cleanupBucket(bucketName, c)
+
+	objectName := "encrypted-object"
+	args["objectName"] = objectName
+	bufSize := dataFileMap["datafile-11-MB"]
+	reader := getDataReader("datafile-11-MB")
+
+	sse := encrypt.DefaultPBKDF([]byte("word1 word2 word3 word4"), []byte(bucketName+objectName))
+
+	info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
+		ContentType:          "content/custom",
+		SendContentMd5:       true,
+		ServerSideEncryption: sse,
+		PartSize:             uint64(bufSize) / 2,
+	})
+	if err != nil {
+		logError(testName, function, args, startTime, "", "PutObject failed", err)
+		return
+	}
+
+	opts := minio.ObjectAttributesOptions{
+		ServerSideEncryption: sse,
+	}
+	attr, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, opts)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil)
+		return
+	}
+	err = validateObjectAttributeRequest(attr, &opts, &objectAttributesTestOptions{
+		TestFileName:     info.Key,
+		ETag:             info.ETag,
+		NumberOfParts:    2,
+		ObjectSize:       int(info.Size),
+		HasFullChecksum:  false,
+		HasParts:         true,
+		HasPartChecksums: false,
+	})
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err)
+		return
+	}
+
+	successLogger(testName, function, args, startTime).Info()
+}
+
+func testGetObjectAttributesErrorCases() {
+	startTime := time.Now()
+	testName := getFuncName()
+	function := "GetObjectAttributes(ctx, bucketName, objectName, opts)"
+	args := map[string]interface{}{
+		"bucketName": "",
+		"objectName": "",
+		"opts":       "minio.ObjectAttributesOptions{}",
+	}
+
+	if !isFullMode() {
+		ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
+		return
+	}
+
+	c, err := minio.New(os.Getenv(serverEndpoint),
+		&minio.Options{
+			TrailingHeaders: true,
+			Creds:           credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport:       createHTTPTransport(),
+			Secure:          mustParseBool(os.Getenv(enableHTTPS)),
+		})
+	if err != nil {
+		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+		return
+	}
+
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+	unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-")
+	unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-")
+
+	_, err = c.GetObjectAttributes(context.Background(), unknownBucket, unknownObject, minio.ObjectAttributesOptions{})
+	if err == nil {
+		logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil)
+		return
+	}
+
+	errorResponse := err.(minio.ErrorResponse)
+	if errorResponse.Code != "NoSuchBucket" {
+		logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil)
+		return
+	}
+
+	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+	args["bucketName"] = bucketName
+	err = c.MakeBucket(
+		context.Background(),
+		bucketName,
+		minio.MakeBucketOptions{Region: "us-east-1"},
+	)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Make bucket failed", err)
+		return
+	}
+
+	bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-")
+	args["bucketName"] = bucketNameV
+	err = c.MakeBucket(
+		context.Background(),
+		bucketNameV,
+		minio.MakeBucketOptions{Region: "us-east-1"},
+	)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Make bucket failed", err)
+		return
+	}
+	err = c.EnableVersioning(context.Background(), bucketNameV)
+	if err != nil {
+		logError(testName, function, args, startTime, "", "Make bucket failed", err)
+		return
+	}
+	defer cleanupBucket(bucketName, c)
+	defer cleanupVersionedBucket(bucketNameV, c)
+
+	_, err = c.GetObjectAttributes(context.Background(), bucketName, unknownObject, minio.ObjectAttributesOptions{})
+	if err == nil {
+		logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil)
+		return
+	}
+
+	errorResponse = err.(minio.ErrorResponse)
+	if errorResponse.Code != "NoSuchKey" {
+		logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil)
+		return
+	}
+
+	_, err = c.GetObjectAttributes(context.Background(), bucketName, "", minio.ObjectAttributesOptions{})
+	if err == nil {
+		logError(testName, function, args, startTime, "", "GetObjectAttributes with empty object name should have failed", nil)
+		return
+	}
+
+	_, err = c.GetObjectAttributes(context.Background(), "", unknownObject, minio.ObjectAttributesOptions{})
+	if err == nil {
+		logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil)
+		return
+	}
+
+	_, err = c.GetObjectAttributes(context.Background(), bucketNameV, unknownObject, minio.ObjectAttributesOptions{
+		VersionID: uuid.NewString(),
+	})
+	if err == nil {
+		logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil)
+		return
+	}
+	errorResponse = err.(minio.ErrorResponse)
+	if errorResponse.Code != "NoSuchVersion" {
+		logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil)
+		return
+	}
+
+	successLogger(testName, function, args, startTime).Info()
+}
+
+type objectAttributesNewObject struct {
+	Object           string
+	ObjectReaderType string
+	Bucket           string
+	ContentType      string
+	SendContentMd5   bool
+	UploadInfo       minio.UploadInfo
+}
+
+type objectAttributesTableTest struct {
+	opts minio.ObjectAttributesOptions
+	test objectAttributesTestOptions
+}
+
+type objectAttributesTestOptions struct {
+	TestFileName     string
+	ETag             string
+	NumberOfParts    int
+	StorageClass     string
+	ObjectSize       int
+	HasPartChecksums bool
+	HasFullChecksum  bool
+	HasParts         bool
+}
+
+func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.ObjectAttributesOptions, test *objectAttributesTestOptions) (err error) {
+	if opts.VersionID != "" {
+		if OA.VersionID != opts.VersionID {
+			err = fmt.Errorf("Expected versionId %s but got versionId %s", opts.VersionID, OA.VersionID)
+			return
+		}
+	}
+
+	partsMissingChecksum := false
+	foundPartChecksum := false
+	for _, v := range OA.ObjectParts.Parts {
+		checksumFound := false
+		if v.ChecksumSHA256 != "" {
+			checksumFound = true
+		} else if v.ChecksumSHA1 != "" {
+			checksumFound = true
+		} else if v.ChecksumCRC32 != "" {
+			checksumFound = true
+		} else if v.ChecksumCRC32C != "" {
+			checksumFound = true
+		}
+		if !checksumFound {
+			partsMissingChecksum = true
+		} else {
+			foundPartChecksum = true
+		}
+	}
+
+	if test.HasPartChecksums {
+		if partsMissingChecksum {
+			err = fmt.Errorf("One or all parts were missing a checksum")
+			return
+		}
+	} else {
+		if foundPartChecksum {
+			err = fmt.Errorf("Did not expect ObjectParts to have checksums but found one")
+			return
+		}
+	}
+
+	hasFullObjectChecksum := true
+	if OA.Checksum.ChecksumCRC32 == "" {
+		if OA.Checksum.ChecksumCRC32C == "" {
+			if OA.Checksum.ChecksumSHA1 == "" {
+				if OA.Checksum.ChecksumSHA256 == "" {
+					hasFullObjectChecksum = false
+				}
+			}
+		}
+	}
+
+	if test.HasFullChecksum {
+		if !hasFullObjectChecksum {
+			err = fmt.Errorf("Full object checksum not found")
+			return
+		}
+	} else {
+		if hasFullObjectChecksum {
+			err = fmt.Errorf("Did not expect a full object checksum but we got one")
+			return
+		}
+	}
+
+	if OA.ETag != test.ETag {
+		err = fmt.Errorf("Etags do not match, got %s but expected %s", OA.ETag, test.ETag)
+		return
+	}
+
+	if test.HasParts {
+		if len(OA.ObjectParts.Parts) < 1 {
+			err = fmt.Errorf("Was expecting ObjectParts but none were present")
+			return
+		}
+	}
+
+	if OA.StorageClass == "" {
+		err = fmt.Errorf("Was expecting a StorageClass but got none")
+		return
+	}
+
+	if OA.ObjectSize != test.ObjectSize {
+		err = fmt.Errorf("Was expecting a ObjectSize but got none")
+		return
+	}
+
+	if test.HasParts {
+		if opts.MaxParts == 0 {
+			if len(OA.ObjectParts.Parts) != OA.ObjectParts.PartsCount {
+				err = fmt.Errorf("expected %s parts but got %d", OA.ObjectParts.PartsCount, len(OA.ObjectParts.Parts))
+				return
+			}
+		} else if (opts.MaxParts + opts.PartNumberMarker) > OA.ObjectParts.PartsCount {
+			if len(OA.ObjectParts.Parts) != (OA.ObjectParts.PartsCount - opts.PartNumberMarker) {
+				err = fmt.Errorf("expected %d parts but got %d", (OA.ObjectParts.PartsCount - opts.PartNumberMarker), len(OA.ObjectParts.Parts))
+				return
+			}
+		} else if opts.MaxParts != 0 {
+			if opts.MaxParts != len(OA.ObjectParts.Parts) {
+				err = fmt.Errorf("expected %d parts but got %d", opts.MaxParts, len(OA.ObjectParts.Parts))
+				return
+			}
+		}
+	}
+
+	if OA.ObjectParts.NextPartNumberMarker == OA.ObjectParts.PartsCount {
+		if OA.ObjectParts.IsTruncated {
+			err = fmt.Errorf("Expected ObjectParts to NOT be truncated, but it was")
+			return
+		}
+	}
+
+	if OA.ObjectParts.NextPartNumberMarker != OA.ObjectParts.PartsCount {
+		if !OA.ObjectParts.IsTruncated {
+			err = fmt.Errorf("Expected ObjectParts to be truncated, but it was NOT")
+			return
+		}
+	}
+
+	return
+}
+
 // Test PutObject using a large data to trigger multipart readat
 func testPutObjectWithMetadata() {
 	// initialize logging params
@@ -2777,8 +3365,9 @@ func testPutObjectWithMetadata() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -2789,7 +3378,7 @@ func testPutObjectWithMetadata() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2883,8 +3472,9 @@ func testPutObjectWithContentLanguage() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -2895,7 +3485,7 @@ func testPutObjectWithContentLanguage() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -2952,8 +3542,9 @@ func testPutObjectStreaming() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -2964,7 +3555,7 @@ func testPutObjectStreaming() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -3023,8 +3614,9 @@ func testGetObjectSeekEnd() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3035,7 +3627,7 @@ func testGetObjectSeekEnd() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -3145,8 +3737,9 @@ func testGetObjectClosedTwice() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3157,7 +3750,7 @@ func testGetObjectClosedTwice() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -3235,8 +3828,9 @@ func testRemoveObjectsContext() {
 	// Instantiate new minio client.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3244,7 +3838,7 @@ func testRemoveObjectsContext() {
 	}
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 	// Enable tracing, write to stdout.
 	// c.TraceOn(os.Stderr)
 
@@ -3331,8 +3925,9 @@ func testRemoveMultipleObjects() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3340,7 +3935,7 @@ func testRemoveMultipleObjects() {
 	}
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Enable tracing, write to stdout.
 	// c.TraceOn(os.Stderr)
@@ -3414,8 +4009,9 @@ func testRemoveMultipleObjectsWithResult() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3423,7 +4019,7 @@ func testRemoveMultipleObjectsWithResult() {
 	}
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Enable tracing, write to stdout.
 	// c.TraceOn(os.Stderr)
@@ -3549,8 +4145,9 @@ func testFPutObjectMultipart() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3561,7 +4158,7 @@ func testFPutObjectMultipart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -3654,8 +4251,9 @@ func testFPutObject() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3666,7 +4264,7 @@ func testFPutObject() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -3823,8 +4421,9 @@ func testFPutObjectContext() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3835,7 +4434,7 @@ func testFPutObjectContext() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -3923,8 +4522,9 @@ func testFPutObjectContextV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -3935,7 +4535,7 @@ func testFPutObjectContextV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4024,8 +4624,9 @@ func testPutObjectContext() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4036,7 +4637,7 @@ func testPutObjectContext() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Make a new bucket.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4096,8 +4697,9 @@ func testGetObjectS3Zip() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4108,7 +4710,7 @@ func testGetObjectS3Zip() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4279,8 +4881,9 @@ func testGetObjectReadSeekFunctional() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4291,7 +4894,7 @@ func testGetObjectReadSeekFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4448,8 +5051,9 @@ func testGetObjectReadAtFunctional() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4460,7 +5064,7 @@ func testGetObjectReadAtFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4625,8 +5229,9 @@ func testGetObjectReadAtWhenEOFWasReached() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4637,7 +5242,7 @@ func testGetObjectReadAtWhenEOFWasReached() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4744,8 +5349,9 @@ func testPresignedPostPolicy() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4756,7 +5362,7 @@ func testPresignedPostPolicy() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -4965,8 +5571,9 @@ func testCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -4977,7 +5584,7 @@ func testCopyObject() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -5159,8 +5766,9 @@ func testSSECEncryptedGetObjectReadSeekFunctional() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -5171,7 +5779,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -5341,8 +5949,9 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -5353,7 +5962,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -5521,8 +6130,9 @@ func testSSECEncryptedGetObjectReadAtFunctional() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -5533,7 +6143,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -5704,8 +6314,9 @@ func testSSES3EncryptedGetObjectReadAtFunctional() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -5716,7 +6327,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -5888,8 +6499,9 @@ func testSSECEncryptionPutGet() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -5900,7 +6512,7 @@ func testSSECEncryptionPutGet() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -5997,8 +6609,9 @@ func testSSECEncryptionFPut() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -6009,7 +6622,7 @@ func testSSECEncryptionFPut() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -6119,8 +6732,9 @@ func testSSES3EncryptionPutGet() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -6131,7 +6745,7 @@ func testSSES3EncryptionPutGet() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -6226,8 +6840,9 @@ func testSSES3EncryptionFPut() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -6238,7 +6853,7 @@ func testSSES3EncryptionFPut() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -6353,8 +6968,9 @@ func testBucketNotification() {
 
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -6365,7 +6981,7 @@ func testBucketNotification() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	bucketName := os.Getenv("NOTIFY_BUCKET")
 	args["bucketName"] = bucketName
@@ -6447,8 +7063,9 @@ func testFunctional() {
 
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err)
@@ -6459,7 +7076,7 @@ func testFunctional() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7134,8 +7751,9 @@ func testGetObjectModified() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -7146,7 +7764,7 @@ func testGetObjectModified() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Make a new bucket.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7229,8 +7847,9 @@ func testPutObjectUploadSeekedObject() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -7241,7 +7860,7 @@ func testPutObjectUploadSeekedObject() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Make a new bucket.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7351,8 +7970,9 @@ func testMakeBucketErrorV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -7363,7 +7983,7 @@ func testMakeBucketErrorV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7410,8 +8030,9 @@ func testGetObjectClosedTwiceV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -7422,7 +8043,7 @@ func testGetObjectClosedTwiceV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7500,8 +8121,9 @@ func testFPutObjectV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -7512,7 +8134,7 @@ func testFPutObjectV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7660,8 +8282,9 @@ func testMakeBucketRegionsV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -7672,7 +8295,7 @@ func testMakeBucketRegionsV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7722,8 +8345,9 @@ func testGetObjectReadSeekFunctionalV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -7734,7 +8358,7 @@ func testGetObjectReadSeekFunctionalV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -7876,8 +8500,9 @@ func testGetObjectReadAtFunctionalV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -7888,7 +8513,7 @@ func testGetObjectReadAtFunctionalV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -8037,8 +8662,9 @@ func testCopyObjectV2() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8049,7 +8675,7 @@ func testCopyObjectV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -8252,8 +8878,9 @@ func testComposeObjectErrorCasesV2() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8349,8 +8976,9 @@ func testCompose10KSourcesV2() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8370,8 +8998,9 @@ func testEncryptedEmptyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -8630,8 +9259,9 @@ func testUnencryptedToSSECCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8656,8 +9286,9 @@ func testUnencryptedToSSES3CopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8683,8 +9314,9 @@ func testUnencryptedToUnencryptedCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8709,8 +9341,9 @@ func testEncryptedSSECToSSECCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8736,8 +9369,9 @@ func testEncryptedSSECToSSES3CopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8763,8 +9397,9 @@ func testEncryptedSSECToUnencryptedCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8790,8 +9425,9 @@ func testEncryptedSSES3ToSSECCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8817,8 +9453,9 @@ func testEncryptedSSES3ToSSES3CopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8844,8 +9481,9 @@ func testEncryptedSSES3ToUnencryptedCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8871,8 +9509,9 @@ func testEncryptedCopyObjectV2() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8897,8 +9536,9 @@ func testDecryptedCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
@@ -8956,8 +9596,9 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -8971,7 +9612,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -9153,8 +9794,9 @@ func testSSECEncryptedToSSECCopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -9168,7 +9810,7 @@ func testSSECEncryptedToSSECCopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -9330,8 +9972,9 @@ func testSSECEncryptedToUnencryptedCopyPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -9345,7 +9988,7 @@ func testSSECEncryptedToUnencryptedCopyPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -9506,8 +10149,9 @@ func testSSECEncryptedToSSES3CopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -9521,7 +10165,7 @@ func testSSECEncryptedToSSES3CopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -9685,8 +10329,9 @@ func testUnencryptedToSSECCopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -9700,7 +10345,7 @@ func testUnencryptedToSSECCopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -9859,8 +10504,9 @@ func testUnencryptedToUnencryptedCopyPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -9874,7 +10520,7 @@ func testUnencryptedToUnencryptedCopyPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -10029,8 +10675,9 @@ func testUnencryptedToSSES3CopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -10044,7 +10691,7 @@ func testUnencryptedToSSES3CopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -10201,8 +10848,9 @@ func testSSES3EncryptedToSSECCopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -10216,7 +10864,7 @@ func testSSES3EncryptedToSSECCopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -10376,8 +11024,9 @@ func testSSES3EncryptedToUnencryptedCopyPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -10391,7 +11040,7 @@ func testSSES3EncryptedToUnencryptedCopyPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -10547,8 +11196,9 @@ func testSSES3EncryptedToSSES3CopyObjectPart() {
 	// Instantiate new minio client object
 	client, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -10562,7 +11212,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
@@ -10720,8 +11370,9 @@ func testUserMetadataCopying() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -10896,8 +11547,9 @@ func testUserMetadataCopyingV2() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -10918,8 +11570,9 @@ func testStorageClassMetadataPutObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -11005,8 +11658,9 @@ func testStorageClassInvalidMetadataPutObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -11047,8 +11701,9 @@ func testStorageClassMetadataCopyObject() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
+			Transport: createHTTPTransport(),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err)
@@ -11176,8 +11831,9 @@ func testPutObjectNoLengthV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -11188,7 +11844,7 @@ func testPutObjectNoLengthV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -11251,8 +11907,9 @@ func testPutObjectsUnknownV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -11263,7 +11920,7 @@ func testPutObjectsUnknownV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -11341,8 +11998,9 @@ func testPutObject0ByteV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -11353,7 +12011,7 @@ func testPutObject0ByteV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -11402,8 +12060,9 @@ func testComposeObjectErrorCases() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -11424,8 +12083,9 @@ func testCompose10KSources() {
 	// Instantiate new minio client object
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -11449,8 +12109,9 @@ func testFunctionalV2() {
 
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
+			Transport: createHTTPTransport(),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -11461,7 +12122,7 @@ func testFunctionalV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -11909,8 +12570,9 @@ func testGetObjectContext() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
@@ -11921,7 +12583,7 @@ func testGetObjectContext() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12011,8 +12673,9 @@ func testFGetObjectContext() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
@@ -12023,7 +12686,7 @@ func testFGetObjectContext() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12099,8 +12762,9 @@ func testGetObjectRanges() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
@@ -12111,7 +12775,7 @@ func testGetObjectRanges() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rng, "minio-go-test-")
@@ -12208,8 +12872,9 @@ func testGetObjectACLContext() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
@@ -12220,7 +12885,7 @@ func testGetObjectACLContext() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12383,8 +13048,9 @@ func testPutObjectContextV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -12395,7 +13061,7 @@ func testPutObjectContextV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Make a new bucket.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12457,8 +13123,9 @@ func testGetObjectContextV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -12469,7 +13136,7 @@ func testGetObjectContextV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12557,8 +13224,9 @@ func testFGetObjectContextV2() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err)
@@ -12569,7 +13237,7 @@ func testFGetObjectContextV2() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12645,8 +13313,9 @@ func testListObjects() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
@@ -12657,7 +13326,7 @@ func testListObjects() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12745,8 +13414,9 @@ func testRemoveObjects() {
 	// Instantiate new minio client object.
 	c, err := minio.New(os.Getenv(serverEndpoint),
 		&minio.Options{
-			Creds:  credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
-			Secure: mustParseBool(os.Getenv(enableHTTPS)),
+			Creds:     credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+			Transport: createHTTPTransport(),
+			Secure:    mustParseBool(os.Getenv(enableHTTPS)),
 		})
 	if err != nil {
 		logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
@@ -12757,7 +13427,7 @@ func testRemoveObjects() {
 	// c.TraceOn(os.Stderr)
 
 	// Set user agent.
-	c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+	c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
 
 	// Generate a new random bucket name.
 	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
@@ -12891,6 +13561,8 @@ func main() {
 
 	// execute tests
 	if isFullMode() {
+		testGetObjectAttributes()
+		testGetObjectAttributesErrorCases()
 		testMakeBucketErrorV2()
 		testGetObjectClosedTwiceV2()
 		testFPutObjectV2()
@@ -12962,6 +13634,7 @@ func main() {
 
 		// SSE-C tests will only work over TLS connection.
 		if tls {
+			testGetObjectAttributesSSECEncryption()
 			testSSECEncryptionPutGet()
 			testSSECEncryptionFPut()
 			testSSECEncryptedGetObjectReadAtFunctional()
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
index 1c73d1008b4dc77e724fa00bd76fc5147043049c..800c4a294a5d485cd5fbf638ef821f2756599cc7 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -93,7 +93,8 @@ type STSAssumeRoleOptions struct {
 	AccessKey string
 	SecretKey string
 
-	Policy string // Optional to assign a policy to the assumed role
+	SessionToken string // Optional if the first request is made with temporary credentials.
+	Policy       string // Optional to assign a policy to the assumed role
 
 	Location        string // Optional commonly needed with AWS STS.
 	DurationSeconds int    // Optional defaults to 1 hour.
@@ -101,6 +102,7 @@ type STSAssumeRoleOptions struct {
 	// Optional only valid if using with AWS STS
 	RoleARN         string
 	RoleSessionName string
+	ExternalID      string
 }
 
 // NewSTSAssumeRole returns a pointer to a new
@@ -161,6 +163,9 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
 	if opts.Policy != "" {
 		v.Set("Policy", opts.Policy)
 	}
+	if opts.ExternalID != "" {
+		v.Set("ExternalId", opts.ExternalID)
+	}
 
 	u, err := url.Parse(endpoint)
 	if err != nil {
@@ -181,6 +186,9 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
 	}
 	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
 	req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
+	if opts.SessionToken != "" {
+		req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
+	}
 	req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
 
 	resp, err := clnt.Do(req)
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
index 0c9536debbae441bc108990f9ef86ac66cbcad84..c5153c4caceac30b3b8f98249bb9b3a748c690e6 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -54,19 +54,36 @@ type IAM struct {
 
 	// Custom endpoint to fetch IAM role credentials.
 	Endpoint string
+
+	// Region configurable custom region for STS
+	Region string
+
+	// Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
+	Container struct {
+		AuthorizationToken     string
+		CredentialsFullURI     string
+		CredentialsRelativeURI string
+	}
+
+	// EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
+	EKSIdentity struct {
+		TokenFile       string
+		RoleARN         string
+		RoleSessionName string
+	}
 }
 
 // IAM Roles for Amazon EC2
 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
 const (
-	defaultIAMRoleEndpoint      = "http://169.254.169.254"
-	defaultECSRoleEndpoint      = "http://169.254.170.2"
-	defaultSTSRoleEndpoint      = "https://sts.amazonaws.com"
-	defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
-	tokenRequestTTLHeader       = "X-aws-ec2-metadata-token-ttl-seconds"
-	tokenPath                   = "/latest/api/token"
-	tokenTTL                    = "21600"
-	tokenRequestHeader          = "X-aws-ec2-metadata-token"
+	DefaultIAMRoleEndpoint      = "http://169.254.169.254"
+	DefaultECSRoleEndpoint      = "http://169.254.170.2"
+	DefaultSTSRoleEndpoint      = "https://sts.amazonaws.com"
+	DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
+	TokenRequestTTLHeader       = "X-aws-ec2-metadata-token-ttl-seconds"
+	TokenPath                   = "/latest/api/token"
+	TokenTTL                    = "21600"
+	TokenRequestHeader          = "X-aws-ec2-metadata-token"
 )
 
 // NewIAM returns a pointer to a new Credentials object wrapping the IAM.
@@ -84,21 +101,55 @@ func NewIAM(endpoint string) *Credentials {
 // the desired
 func (m *IAM) Retrieve() (Value, error) {
 	token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
+	if token == "" {
+		token = m.Container.AuthorizationToken
+	}
+
+	relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
+	if relativeURI == "" {
+		relativeURI = m.Container.CredentialsRelativeURI
+	}
+
+	fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
+	if fullURI == "" {
+		fullURI = m.Container.CredentialsFullURI
+	}
+
+	identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
+	if identityFile == "" {
+		identityFile = m.EKSIdentity.TokenFile
+	}
+
+	roleArn := os.Getenv("AWS_ROLE_ARN")
+	if roleArn == "" {
+		roleArn = m.EKSIdentity.RoleARN
+	}
+
+	roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
+	if roleSessionName == "" {
+		roleSessionName = m.EKSIdentity.RoleSessionName
+	}
+
+	region := os.Getenv("AWS_REGION")
+	if region == "" {
+		region = m.Region
+	}
+
 	var roleCreds ec2RoleCredRespBody
 	var err error
 
 	endpoint := m.Endpoint
 	switch {
-	case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0:
+	case identityFile != "":
 		if len(endpoint) == 0 {
-			if len(os.Getenv("AWS_REGION")) > 0 {
-				if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
-					endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com.cn"
+			if region != "" {
+				if strings.HasPrefix(region, "cn-") {
+					endpoint = "https://sts." + region + ".amazonaws.com.cn"
 				} else {
-					endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com"
+					endpoint = "https://sts." + region + ".amazonaws.com"
 				}
 			} else {
-				endpoint = defaultSTSRoleEndpoint
+				endpoint = DefaultSTSRoleEndpoint
 			}
 		}
 
@@ -106,15 +157,15 @@ func (m *IAM) Retrieve() (Value, error) {
 			Client:      m.Client,
 			STSEndpoint: endpoint,
 			GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
-				token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
+				token, err := os.ReadFile(identityFile)
 				if err != nil {
 					return nil, err
 				}
 
 				return &WebIdentityToken{Token: string(token)}, nil
 			},
-			RoleARN:         os.Getenv("AWS_ROLE_ARN"),
-			roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"),
+			RoleARN:         roleArn,
+			roleSessionName: roleSessionName,
 		}
 
 		stsWebIdentityCreds, err := creds.Retrieve()
@@ -123,17 +174,16 @@ func (m *IAM) Retrieve() (Value, error) {
 		}
 		return stsWebIdentityCreds, err
 
-	case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0:
+	case relativeURI != "":
 		if len(endpoint) == 0 {
-			endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint,
-				os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"))
+			endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
 		}
 
 		roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
 
-	case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0:
+	case fullURI != "":
 		if len(endpoint) == 0 {
-			endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
+			endpoint = fullURI
 			var ok bool
 			if ok, err = isLoopback(endpoint); !ok {
 				if err == nil {
@@ -189,7 +239,7 @@ func getIAMRoleURL(endpoint string) (*url.URL, error) {
 	if err != nil {
 		return nil, err
 	}
-	u.Path = defaultIAMSecurityCredsPath
+	u.Path = DefaultIAMSecurityCredsPath
 	return u, nil
 }
 
@@ -203,7 +253,7 @@ func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, err
 		return nil, err
 	}
 	if token != "" {
-		req.Header.Add(tokenRequestHeader, token)
+		req.Header.Add(TokenRequestHeader, token)
 	}
 	resp, err := client.Do(req)
 	if err != nil {
@@ -258,11 +308,11 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
 	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
 	defer cancel()
 
-	req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil)
+	req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
 	if err != nil {
 		return "", err
 	}
-	req.Header.Add(tokenRequestTTLHeader, tokenTTL)
+	req.Header.Add(TokenRequestTTLHeader, TokenTTL)
 	resp, err := client.Do(req)
 	if err != nil {
 		return "", err
@@ -285,7 +335,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
 // reading the response an error will be returned.
 func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
 	if endpoint == "" {
-		endpoint = defaultIAMRoleEndpoint
+		endpoint = DefaultIAMRoleEndpoint
 	}
 
 	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
@@ -332,7 +382,7 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody,
 		return ec2RoleCredRespBody{}, err
 	}
 	if token != "" {
-		req.Header.Add(tokenRequestHeader, token)
+		req.Header.Add(TokenRequestHeader, token)
 	}
 
 	resp, err := client.Do(req)
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
index 830061b8e0b55f906ac1a18b880f5b4c23f46177..10c95ffe52d226e62ca1f2edb3958f938c8cf246 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -211,35 +211,43 @@ func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e
 
 // And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
 type And struct {
-	XMLName xml.Name `xml:"And" json:"-"`
-	Prefix  string   `xml:"Prefix" json:"Prefix,omitempty"`
-	Tags    []Tag    `xml:"Tag" json:"Tags,omitempty"`
+	XMLName               xml.Name `xml:"And" json:"-"`
+	Prefix                string   `xml:"Prefix" json:"Prefix,omitempty"`
+	Tags                  []Tag    `xml:"Tag" json:"Tags,omitempty"`
+	ObjectSizeLessThan    int64    `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
+	ObjectSizeGreaterThan int64    `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
 }
 
 // IsEmpty returns true if Tags field is null
 func (a And) IsEmpty() bool {
-	return len(a.Tags) == 0 && a.Prefix == ""
+	return len(a.Tags) == 0 && a.Prefix == "" &&
+		a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
 }
 
 // Filter will be used in selecting rule(s) for lifecycle configuration
 type Filter struct {
-	XMLName xml.Name `xml:"Filter" json:"-"`
-	And     And      `xml:"And,omitempty" json:"And,omitempty"`
-	Prefix  string   `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
-	Tag     Tag      `xml:"Tag,omitempty" json:"Tag,omitempty"`
+	XMLName               xml.Name `xml:"Filter" json:"-"`
+	And                   And      `xml:"And,omitempty" json:"And,omitempty"`
+	Prefix                string   `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
+	Tag                   Tag      `xml:"Tag,omitempty" json:"Tag,omitempty"`
+	ObjectSizeLessThan    int64    `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
+	ObjectSizeGreaterThan int64    `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
 }
 
 // IsNull returns true if all Filter fields are empty.
 func (f Filter) IsNull() bool {
-	return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == ""
+	return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
+		f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
 }
 
 // MarshalJSON customizes json encoding by removing empty values.
 func (f Filter) MarshalJSON() ([]byte, error) {
 	type filter struct {
-		And    *And   `json:"And,omitempty"`
-		Prefix string `json:"Prefix,omitempty"`
-		Tag    *Tag   `json:"Tag,omitempty"`
+		And                   *And   `json:"And,omitempty"`
+		Prefix                string `json:"Prefix,omitempty"`
+		Tag                   *Tag   `json:"Tag,omitempty"`
+		ObjectSizeLessThan    int64  `json:"ObjectSizeLessThan,omitempty"`
+		ObjectSizeGreaterThan int64  `json:"ObjectSizeGreaterThan,omitempty"`
 	}
 
 	newf := filter{
@@ -251,6 +259,8 @@ func (f Filter) MarshalJSON() ([]byte, error) {
 	if !f.And.IsEmpty() {
 		newf.And = &f.And
 	}
+	newf.ObjectSizeLessThan = f.ObjectSizeLessThan
+	newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
 	return json.Marshal(newf)
 }
 
@@ -271,7 +281,19 @@ func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
 			return err
 		}
 	default:
-		// Always print Prefix field when both And & Tag are empty
+		if f.ObjectSizeLessThan > 0 {
+			if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
+				return err
+			}
+			break
+		}
+		if f.ObjectSizeGreaterThan > 0 {
+			if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
+				return err
+			}
+			break
+		}
+		// Print empty Prefix field only when everything else is empty
 		if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
 			return err
 		}
@@ -380,7 +402,7 @@ func (e Expiration) IsDeleteMarkerExpirationEnabled() bool {
 
 // IsNull returns true if both date and days fields are null
 func (e Expiration) IsNull() bool {
-	return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled()
+	return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() && !e.DeleteAll.IsEnabled()
 }
 
 // MarshalXML is expiration is non null
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
index 01cc26fc2e437c6262249eb82beba2c17d16fa5f..a44799d246a77320babea7a7a7bd08a531b98f2c 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -37,9 +37,15 @@ const (
 	ObjectCreatedPut                                   EventType = "s3:ObjectCreated:Put"
 	ObjectCreatedPost                                  EventType = "s3:ObjectCreated:Post"
 	ObjectCreatedCopy                                  EventType = "s3:ObjectCreated:Copy"
+	ObjectCreatedDeleteTagging                         EventType = "s3:ObjectCreated:DeleteTagging"
 	ObjectCreatedCompleteMultipartUpload               EventType = "s3:ObjectCreated:CompleteMultipartUpload"
+	ObjectCreatedPutLegalHold                          EventType = "s3:ObjectCreated:PutLegalHold"
+	ObjectCreatedPutRetention                          EventType = "s3:ObjectCreated:PutRetention"
+	ObjectCreatedPutTagging                            EventType = "s3:ObjectCreated:PutTagging"
 	ObjectAccessedGet                                  EventType = "s3:ObjectAccessed:Get"
 	ObjectAccessedHead                                 EventType = "s3:ObjectAccessed:Head"
+	ObjectAccessedGetRetention                         EventType = "s3:ObjectAccessed:GetRetention"
+	ObjectAccessedGetLegalHold                         EventType = "s3:ObjectAccessed:GetLegalHold"
 	ObjectAccessedAll                                  EventType = "s3:ObjectAccessed:*"
 	ObjectRemovedAll                                   EventType = "s3:ObjectRemoved:*"
 	ObjectRemovedDelete                                EventType = "s3:ObjectRemoved:Delete"
@@ -56,6 +62,9 @@ const (
 	ObjectReplicationOperationMissedThreshold          EventType = "s3:Replication:OperationMissedThreshold"
 	ObjectReplicationOperationNotTracked               EventType = "s3:Replication:OperationNotTracked"
 	ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
+	ObjectScannerManyVersions                          EventType = "s3:Scanner:ManyVersions"
+	ObjectScannerBigPrefix                             EventType = "s3:Scanner:BigPrefix"
+	ObjectScannerAll                                   EventType = "s3:Scanner:*"
 	BucketCreatedAll                                   EventType = "s3:BucketCreated:*"
 	BucketRemovedAll                                   EventType = "s3:BucketRemoved:*"
 )
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
index c35e58e1ac9f00d840da2972420a49806e081b0d..2566a3df76fc0e770888060277d63df3f20e5eaa 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go
@@ -149,22 +149,19 @@ func (set StringSet) MarshalJSON() ([]byte, error) {
 }
 
 // UnmarshalJSON - parses JSON data and creates new set with it.
-// If 'data' contains JSON string array, the set contains each string.
-// If 'data' contains JSON string, the set contains the string as one element.
-// If 'data' contains Other JSON types, JSON parse error is returned.
 func (set *StringSet) UnmarshalJSON(data []byte) error {
-	sl := []string{}
+	sl := []interface{}{}
 	var err error
 	if err = json.Unmarshal(data, &sl); err == nil {
 		*set = make(StringSet)
 		for _, s := range sl {
-			set.Add(s)
+			set.Add(fmt.Sprintf("%v", s))
 		}
 	} else {
-		var s string
+		var s interface{}
 		if err = json.Unmarshal(data, &s); err == nil {
 			*set = make(StringSet)
-			set.Add(s)
+			set.Add(fmt.Sprintf("%v", s))
 		}
 	}
 
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
index 0a26edd5a785312b54fc151ea4d37c4e1d14ff54..b1de7b62a473af5d8480a9f240cb16645c605d7f 100644
--- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
@@ -50,6 +50,7 @@ var awsS3EndpointMap = map[string]string{
 	"cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
 	"ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com",
 	"ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com",
+	"il-central-1":   "s3.dualstack.il-central-1.amazonaws.com",
 }
 
 // getS3Endpoint get Amazon S3 endpoint based on the bucket location.
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
index 6a93561ea7aa1ade1520959f1d56e60e3cd1ef47..94c19b2a5714a811e08e0426920d2d580d54e45e 100644
--- a/vendor/github.com/minio/minio-go/v7/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -21,6 +21,7 @@ import (
 	"context"
 	"crypto/md5"
 	fipssha256 "crypto/sha256"
+	"crypto/tls"
 	"encoding/base64"
 	"encoding/hex"
 	"encoding/xml"
@@ -513,6 +514,7 @@ func isAmzHeader(headerKey string) bool {
 
 // supportedQueryValues is a list of query strings that can be passed in when using GetObject.
 var supportedQueryValues = map[string]bool{
+	"attributes":                   true,
 	"partNumber":                   true,
 	"versionId":                    true,
 	"response-cache-control":       true,
@@ -528,6 +530,14 @@ func isStandardQueryValue(qsKey string) bool {
 	return supportedQueryValues[qsKey]
 }
 
+// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the
+// set of query params starting with "x-" are ignored by S3.
+const allowedCustomQueryPrefix = "x-"
+
+func isCustomQueryValue(qsKey string) bool {
+	return strings.HasPrefix(qsKey, allowedCustomQueryPrefix)
+}
+
 var (
 	md5Pool    = sync.Pool{New: func() interface{} { return md5.New() }}
 	sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
@@ -614,7 +624,7 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
 	urlErr := &url.Error{}
 	if errors.As(err, &urlErr) {
 		switch urlErr.Err.(type) {
-		case *net.DNSError, *net.OpError, net.UnknownNetworkError:
+		case *net.DNSError, *net.OpError, net.UnknownNetworkError, *tls.CertificateVerificationError:
 			return true
 		}
 	}
@@ -641,7 +651,12 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
 	case strings.Contains(err.Error(), "connection refused"):
 		// If err is connection refused
 		return true
-
+	case strings.Contains(err.Error(), "server gave HTTP response to HTTPS client"):
+		// If err is TLS client is used with HTTP server
+		return true
+	case strings.Contains(err.Error(), "Client sent an HTTP request to an HTTPS server"):
+		// If err is plain-text Client is used with a HTTPS server
+		return true
 	case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
 		// Denial errors
 		return true
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go
index c71697c124826b55f92dab76b2f627192971911a..0bbc0d7250c65fb7f328086f9463fd1d0aa58a84 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go
@@ -28,6 +28,9 @@ const (
 	FakeType                     = "fake"
 	KafkaType                    = "kafka"
 	S3Type                       = "s3"
+	OtlpLogsType                 = "otlplogs"
+	OtlpMetricsType              = "otlpmetrics"
+	OtlpTracesType               = "otlptraces"
 	StdoutType                   = "stdout"
 	LokiType                     = "loki"
 	IpfixType                    = "ipfix"
@@ -45,6 +48,7 @@ const (
 	AddLocationRuleType          = "add_location"
 	AddServiceRuleType           = "add_service"
 	AddKubernetesRuleType        = "add_kubernetes"
+	AddKubernetesInfraRuleType   = "add_kubernetes_infra"
 	ReinterpretDirectionRuleType = "reinterpret_direction"
 	PromFilterExact              = "exact"
 	PromFilterPresence           = "presence"
@@ -59,19 +63,22 @@ const (
 // Note: items beginning with doc: "## title" are top level items that get divided into sections inside api.md.
 
 type API struct {
-	PromEncode         PromEncode       `yaml:"prom" doc:"## Prometheus encode API\nFollowing is the supported API format for prometheus encode:\n"`
-	KafkaEncode        EncodeKafka      `yaml:"kafka" doc:"## Kafka encode API\nFollowing is the supported API format for kafka encode:\n"`
-	S3Encode           EncodeS3         `yaml:"s3" doc:"## S3 encode API\nFollowing is the supported API format for S3 encode:\n"`
-	IngestCollector    IngestCollector  `yaml:"collector" doc:"## Ingest collector API\nFollowing is the supported API format for the NetFlow / IPFIX collector:\n"`
-	IngestKafka        IngestKafka      `yaml:"kafka" doc:"## Ingest Kafka API\nFollowing is the supported API format for the kafka ingest:\n"`
-	IngestGRPCProto    IngestGRPCProto  `yaml:"grpc" doc:"## Ingest GRPC from Network Observability eBPF Agent\nFollowing is the supported API format for the Network Observability eBPF ingest:\n"`
-	IngestStdin        IngestStdin      `yaml:"stdin" doc:"## Ingest Standard Input\nFollowing is the supported API format for the standard input ingest:\n"`
-	TransformGeneric   TransformGeneric `yaml:"generic" doc:"## Transform Generic API\nFollowing is the supported API format for generic transformations:\n"`
-	TransformFilter    TransformFilter  `yaml:"filter" doc:"## Transform Filter API\nFollowing is the supported API format for filter transformations:\n"`
-	TransformNetwork   TransformNetwork `yaml:"network" doc:"## Transform Network API\nFollowing is the supported API format for network transformations:\n"`
-	WriteLoki          WriteLoki        `yaml:"loki" doc:"## Write Loki API\nFollowing is the supported API format for writing to loki:\n"`
-	WriteStdout        WriteStdout      `yaml:"stdout" doc:"## Write Standard Output\nFollowing is the supported API format for writing to standard output:\n"`
-	ExtractAggregate   Aggregates       `yaml:"aggregates" doc:"## Aggregate metrics API\nFollowing is the supported API format for specifying metrics aggregations:\n"`
-	ConnectionTracking ConnTrack        `yaml:"conntrack" doc:"## Connection tracking API\nFollowing is the supported API format for specifying connection tracking:\n"`
-	ExtractTimebased   ExtractTimebased `yaml:"timebased" doc:"## Time-based Filters API\nFollowing is the supported API format for specifying metrics time-based filters:\n"`
+	PromEncode         PromEncode        `yaml:"prom" doc:"## Prometheus encode API\nFollowing is the supported API format for prometheus encode:\n"`
+	KafkaEncode        EncodeKafka       `yaml:"kafka" doc:"## Kafka encode API\nFollowing is the supported API format for kafka encode:\n"`
+	S3Encode           EncodeS3          `yaml:"s3" doc:"## S3 encode API\nFollowing is the supported API format for S3 encode:\n"`
+	IngestCollector    IngestCollector   `yaml:"collector" doc:"## Ingest collector API\nFollowing is the supported API format for the NetFlow / IPFIX collector:\n"`
+	IngestKafka        IngestKafka       `yaml:"kafka" doc:"## Ingest Kafka API\nFollowing is the supported API format for the kafka ingest:\n"`
+	IngestGRPCProto    IngestGRPCProto   `yaml:"grpc" doc:"## Ingest GRPC from Network Observability eBPF Agent\nFollowing is the supported API format for the Network Observability eBPF ingest:\n"`
+	IngestStdin        IngestStdin       `yaml:"stdin" doc:"## Ingest Standard Input\nFollowing is the supported API format for the standard input ingest:\n"`
+	TransformGeneric   TransformGeneric  `yaml:"generic" doc:"## Transform Generic API\nFollowing is the supported API format for generic transformations:\n"`
+	TransformFilter    TransformFilter   `yaml:"filter" doc:"## Transform Filter API\nFollowing is the supported API format for filter transformations:\n"`
+	TransformNetwork   TransformNetwork  `yaml:"network" doc:"## Transform Network API\nFollowing is the supported API format for network transformations:\n"`
+	WriteLoki          WriteLoki         `yaml:"loki" doc:"## Write Loki API\nFollowing is the supported API format for writing to loki:\n"`
+	WriteStdout        WriteStdout       `yaml:"stdout" doc:"## Write Standard Output\nFollowing is the supported API format for writing to standard output:\n"`
+	ExtractAggregate   Aggregates        `yaml:"aggregates" doc:"## Aggregate metrics API\nFollowing is the supported API format for specifying metrics aggregations:\n"`
+	ConnectionTracking ConnTrack         `yaml:"conntrack" doc:"## Connection tracking API\nFollowing is the supported API format for specifying connection tracking:\n"`
+	ExtractTimebased   ExtractTimebased  `yaml:"timebased" doc:"## Time-based Filters API\nFollowing is the supported API format for specifying metrics time-based filters:\n"`
+	EncodeOtlpLogs     EncodeOtlpLogs    `yaml:"otlplogs" doc:"## OpenTelemetry Logs API\nFollowing is the supported API format for writing logs to an OpenTelemetry collector:\n"`
+	EncodeOtlpMetrics  EncodeOtlpMetrics `yaml:"otlpmetrics" doc:"## OpenTelemetry Metrics API\nFollowing is the supported API format for writing metrics to an OpenTelemetry collector:\n"`
+	EncodeOtlpTraces   EncodeOtlpTraces  `yaml:"otlptraces" doc:"## OpenTelemetry Traces API\nFollowing is the supported API format for writing traces to an OpenTelemetry collector:\n"`
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_otlp.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_otlp.go
new file mode 100644
index 0000000000000000000000000000000000000000..86e3c32c008d39b8f528ea1133621180f3b614b6
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_otlp.go
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package api
+
+type EncodeOtlpLogs struct {
+	*OtlpConnectionInfo `json:",inline" doc:"OpenTelemetry connection info; includes:"`
+}
+
+type EncodeOtlpTraces struct {
+	*OtlpConnectionInfo `json:",inline" doc:"OpenTelemetry connection info; includes:"`
+	SpanSplitter        []string `yaml:"spanSplitter,omitempty" json:"spanSplitter,omitempty" doc:"separate span for each prefix listed"`
+}
+
+type EncodeOtlpMetrics struct {
+	*OtlpConnectionInfo `json:",inline" doc:"OpenTelemetry connection info; includes:"`
+	Prefix              string       `yaml:"prefix,omitempty" json:"prefix,omitempty" doc:"prefix added to each metric name"`
+	Metrics             MetricsItems `yaml:"metrics,omitempty" json:"metrics,omitempty" doc:"list of metric definitions, each includes:"`
+	PushTimeInterval    Duration     `yaml:"pushTimeInterval,omitempty" json:"pushTimeInterval,omitempty" doc:"how often should metrics be sent to collector:"`
+	ExpiryTime          Duration     `yaml:"expiryTime,omitempty" json:"expiryTime,omitempty" doc:"time duration of no-flow to wait before deleting data item"`
+}
+
+type OtlpConnectionInfo struct {
+	Address        string            `yaml:"address" json:"address" doc:"endpoint address to expose"`
+	Port           int               `yaml:"port" json:"port" doc:"endpoint port number to expose"`
+	ConnectionType string            `yaml:"connectionType" json:"connectionType" doc:"interface mechanism: either http or grpc"`
+	TLS            *ClientTLS        `yaml:"tls,omitempty" json:"tls,omitempty" doc:"TLS configuration for the endpoint"`
+	Headers        map[string]string `yaml:"headers,omitempty" json:"headers,omitempty" doc:"headers to add to messages (optional)"`
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go
index 530aed26f5fe3e6ff22a4def89956adec10ec2b3..07213d613810b8b83a478433a1a9831d8dfd044f 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go
@@ -23,22 +23,22 @@ type PromTLSConf struct {
 }
 
 type PromEncode struct {
-	*PromConnectionInfo
-	Metrics    PromMetricsItems `yaml:"metrics,omitempty" json:"metrics,omitempty" doc:"list of prometheus metric definitions, each includes:"`
-	Prefix     string           `yaml:"prefix,omitempty" json:"prefix,omitempty" doc:"prefix added to each metric name"`
-	ExpiryTime Duration         `yaml:"expiryTime,omitempty" json:"expiryTime,omitempty" doc:"time duration of no-flow to wait before deleting prometheus data item"`
-	MaxMetrics int              `yaml:"maxMetrics,omitempty" json:"maxMetrics,omitempty" doc:"maximum number of metrics to report (default: unlimited)"`
+	*PromConnectionInfo `json:",inline,omitempty" doc:"Prometheus connection info (optional); includes:"`
+	Metrics             MetricsItems `yaml:"metrics,omitempty" json:"metrics,omitempty" doc:"list of prometheus metric definitions, each includes:"`
+	Prefix              string       `yaml:"prefix,omitempty" json:"prefix,omitempty" doc:"prefix added to each metric name"`
+	ExpiryTime          Duration     `yaml:"expiryTime,omitempty" json:"expiryTime,omitempty" doc:"time duration of no-flow to wait before deleting prometheus data item"`
+	MaxMetrics          int          `yaml:"maxMetrics,omitempty" json:"maxMetrics,omitempty" doc:"maximum number of metrics to report (default: unlimited)"`
 }
 
-type PromEncodeOperationEnum struct {
+type MetricEncodeOperationEnum struct {
 	Gauge        string `yaml:"gauge" json:"gauge" doc:"single numerical value that can arbitrarily go up and down"`
 	Counter      string `yaml:"counter" json:"counter" doc:"monotonically increasing counter whose value can only increase"`
 	Histogram    string `yaml:"histogram" json:"histogram" doc:"counts samples in configurable buckets"`
 	AggHistogram string `yaml:"agg_histogram" json:"agg_histogram" doc:"counts samples in configurable buckets, pre-aggregated via an Aggregate stage"`
 }
 
-func PromEncodeOperationName(operation string) string {
-	return GetEnumName(PromEncodeOperationEnum{}, operation)
+func MetricEncodeOperationName(operation string) string {
+	return GetEnumName(MetricEncodeOperationEnum{}, operation)
 }
 
 type PromConnectionInfo struct {
@@ -47,39 +47,39 @@ type PromConnectionInfo struct {
 	TLS     *PromTLSConf `yaml:"tls,omitempty" json:"tls,omitempty" doc:"TLS configuration for the endpoint"`
 }
 
-type PromMetricsItem struct {
-	Name       string              `yaml:"name" json:"name" doc:"the metric name"`
-	Type       string              `yaml:"type" json:"type" enum:"PromEncodeOperationEnum" doc:"one of the following:"`
-	Filter     PromMetricsFilter   `yaml:"filter,omitempty" json:"filter,omitempty" doc:"an optional criterion to filter entries by. Deprecated: use filters instead."`
-	Filters    []PromMetricsFilter `yaml:"filters" json:"filters" doc:"a list of criteria to filter entries by"`
-	ValueKey   string              `yaml:"valueKey" json:"valueKey" doc:"entry key from which to resolve metric value"`
-	Labels     []string            `yaml:"labels" json:"labels" doc:"labels to be associated with the metric"`
-	Buckets    []float64           `yaml:"buckets" json:"buckets" doc:"histogram buckets"`
-	ValueScale float64             `yaml:"valueScale" json:"valueScale" doc:"scale factor of the value (MetricVal := FlowVal / Scale)"`
+type MetricsItem struct {
+	Name       string          `yaml:"name" json:"name" doc:"the metric name"`
+	Type       string          `yaml:"type" json:"type" enum:"MetricEncodeOperationEnum" doc:"one of the following:"`
+	Filter     MetricsFilter   `yaml:"filter,omitempty" json:"filter,omitempty" doc:"an optional criterion to filter entries by. Deprecated: use filters instead."`
+	Filters    []MetricsFilter `yaml:"filters" json:"filters" doc:"a list of criteria to filter entries by"`
+	ValueKey   string          `yaml:"valueKey" json:"valueKey" doc:"entry key from which to resolve metric value"`
+	Labels     []string        `yaml:"labels" json:"labels" doc:"labels to be associated with the metric"`
+	Buckets    []float64       `yaml:"buckets" json:"buckets" doc:"histogram buckets"`
+	ValueScale float64         `yaml:"valueScale" json:"valueScale" doc:"scale factor of the value (MetricVal := FlowVal / Scale)"`
 }
 
-func (i *PromMetricsItem) GetFilters() []PromMetricsFilter {
+func (i *MetricsItem) GetFilters() []MetricsFilter {
 	if len(i.Filters) == 0 && i.Filter.Key != "" {
-		return []PromMetricsFilter{i.Filter}
+		return []MetricsFilter{i.Filter}
 	}
 	return i.Filters
 }
 
-type PromMetricsItems []PromMetricsItem
+type MetricsItems []MetricsItem
 
-type PromMetricsFilter struct {
+type MetricsFilter struct {
 	Key   string `yaml:"key" json:"key" doc:"the key to match and filter by"`
 	Value string `yaml:"value" json:"value" doc:"the value to match and filter by"`
-	Type  string `yaml:"type" json:"type" enum:"PromEncodeFilterTypeEnum" doc:"the type of filter match: exact (default), presence, absence or regex"`
+	Type  string `yaml:"type" json:"type" enum:"MetricEncodeFilterTypeEnum" doc:"the type of filter match: exact (default), presence, absence or regex"`
 }
 
-type PromEncodeFilterTypeEnum struct {
+type MetricEncodeFilterTypeEnum struct {
 	Exact    string `yaml:"exact" json:"exact" doc:"match exactly the provided fitler value"`
 	Presence string `yaml:"presence" json:"presence" doc:"filter key must be present (filter value is ignored)"`
 	Absence  string `yaml:"absence" json:"absence" doc:"filter key must be absent (filter value is ignored)"`
 	Regex    string `yaml:"regex" json:"regex" doc:"match filter value as a regular expression"`
 }
 
-func PromEncodeFilterTypeName(t string) string {
-	return GetEnumName(PromEncodeFilterTypeEnum{}, t)
+func MetricEncodeFilterTypeName(t string) string {
+	return GetEnumName(MetricEncodeFilterTypeEnum{}, t)
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go
index 4b8deab0b452bd998c6a32a3292fb20812e34911..80b94c5a6872ba08e2d97838f915044601c90398 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go
@@ -23,8 +23,8 @@ import (
 )
 
 type enums struct {
-	PromEncodeOperationEnum       PromEncodeOperationEnum
-	PromEncodeFilterTypeEnum      PromEncodeFilterTypeEnum
+	MetricEncodeOperationEnum     MetricEncodeOperationEnum
+	MetricEncodeFilterTypeEnum    MetricEncodeFilterTypeEnum
 	TransformNetworkOperationEnum TransformNetworkOperationEnum
 	TransformFilterOperationEnum  TransformFilterOperationEnum
 	TransformGenericOperationEnum TransformGenericOperationEnum
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
index 53f685bd3446efcb0bff2bf5d2d1dac2f102a862..28167bbbdb3d514e7d3ec3e770bf08bd32cd8a2f 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
@@ -43,6 +43,7 @@ const (
 	OpAddLocation          = "add_location"
 	OpAddService           = "add_service"
 	OpAddKubernetes        = "add_kubernetes"
+	OpAddKubernetesInfra   = "add_kubernetes_infra"
 	OpReinterpretDirection = "reinterpret_direction"
 	OpAddIPCategory        = "add_ip_category"
 )
@@ -52,6 +53,7 @@ type TransformNetworkOperationEnum struct {
 	AddLocation          string `yaml:"add_location" json:"add_location" doc:"add output location fields from input"`
 	AddService           string `yaml:"add_service" json:"add_service" doc:"add output network service field from input port and parameters protocol field"`
 	AddKubernetes        string `yaml:"add_kubernetes" json:"add_kubernetes" doc:"add output kubernetes fields from input"`
+	AddKubernetesInfra   string `yaml:"add_kubernetes_infra" json:"add_kubernetes_infra" doc:"add output kubernetes isInfra field from input"`
 	ReinterpretDirection string `yaml:"reinterpret_direction" json:"reinterpret_direction" doc:"reinterpret flow direction at the node level (instead of net interface), to ease the deduplication process"`
 	AddIPCategory        string `yaml:"add_ip_category" json:"add_ip_category" doc:"categorize IPs based on known subnets configuration"`
 }
@@ -61,11 +63,23 @@ func TransformNetworkOperationName(operation string) string {
 }
 
 type NetworkTransformRule struct {
-	Input      string `yaml:"input,omitempty" json:"input,omitempty" doc:"entry input field"`
-	Output     string `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
-	Type       string `yaml:"type,omitempty" json:"type,omitempty" enum:"TransformNetworkOperationEnum" doc:"one of the following:"`
-	Parameters string `yaml:"parameters,omitempty" json:"parameters,omitempty" doc:"parameters specific to type"`
-	Assignee   string `yaml:"assignee,omitempty" json:"assignee,omitempty" doc:"value needs to assign to output field"`
+	Input           string        `yaml:"input,omitempty" json:"input,omitempty" doc:"entry input field"`
+	Output          string        `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
+	Type            string        `yaml:"type,omitempty" json:"type,omitempty" enum:"TransformNetworkOperationEnum" doc:"one of the following:"`
+	Parameters      string        `yaml:"parameters,omitempty" json:"parameters,omitempty" doc:"parameters specific to type"`
+	Assignee        string        `yaml:"assignee,omitempty" json:"assignee,omitempty" doc:"value needs to assign to output field"`
+	KubernetesInfra *K8sInfraRule `yaml:"kubernetes_infra,omitempty" json:"kubernetes_infra,omitempty" doc:"Kubernetes infra rule specific configuration"`
+	Kubernetes      *K8sRule      `yaml:"kubernetes,omitempty" json:"kubernetes,omitempty" doc:"Kubernetes rule specific configuration"`
+}
+
+type K8sInfraRule struct {
+	Inputs      []string `yaml:"inputs,omitempty" json:"inputs,omitempty" doc:"entry inputs fields"`
+	Output      string   `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
+	InfraPrefix string   `yaml:"infra_prefixes,omitempty" json:"infra_prefixes,omitempty" doc:"Namespace prefixes that will be tagged as infra"`
+}
+
+type K8sRule struct {
+	AddZone bool `yaml:"add_zone,omitempty" json:"add_zone,omitempty" doc:"If true the rule will add the zone"`
 }
 
 type NetworkTransformDirectionInfo struct {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go
index d2c0242cc1390b44db06d8552082a7a46c8ae011..3d37c3f7204d460f7a37a36422598d428ed3887c 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go
@@ -115,10 +115,13 @@ type Extract struct {
 }
 
 type Encode struct {
-	Type  string           `yaml:"type" json:"type"`
-	Prom  *api.PromEncode  `yaml:"prom,omitempty" json:"prom,omitempty"`
-	Kafka *api.EncodeKafka `yaml:"kafka,omitempty" json:"kafka,omitempty"`
-	S3    *api.EncodeS3    `yaml:"s3,omitempty" json:"s3,omitempty"`
+	Type        string                 `yaml:"type" json:"type"`
+	Prom        *api.PromEncode        `yaml:"prom,omitempty" json:"prom,omitempty"`
+	Kafka       *api.EncodeKafka       `yaml:"kafka,omitempty" json:"kafka,omitempty"`
+	S3          *api.EncodeS3          `yaml:"s3,omitempty" json:"s3,omitempty"`
+	OtlpLogs    *api.EncodeOtlpLogs    `yaml:"otlplogs,omitempty" json:"otlplogs,omitempty"`
+	OtlpMetrics *api.EncodeOtlpMetrics `yaml:"otlpmetrics,omitempty" json:"otlpmetrics,omitempty"`
+	OtlpTraces  *api.EncodeOtlpTraces  `yaml:"otlptraces,omitempty" json:"otlptraces,omitempty"`
 }
 
 type Write struct {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go
index a252ebc469142102df862d680bb331442b77cdaa..c4510a69500016758c67394240988583c2115edf 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go
@@ -24,35 +24,28 @@ import (
 
 	"github.com/heptiolabs/healthcheck"
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	"github.com/netobserv/flowlogs-pipeline/pkg/server"
 	log "github.com/sirupsen/logrus"
 )
 
-type Server struct {
-	handler healthcheck.Handler
-	Address string
-}
-
-func (hs *Server) Serve() {
-	for {
-		err := http.ListenAndServe(hs.Address, hs.handler)
-		log.Errorf("http.ListenAndServe error %v", err)
-		time.Sleep(60 * time.Second)
-	}
-}
-
-func NewHealthServer(opts *config.Options, isAlive healthcheck.Check, isReady healthcheck.Check) *Server {
-
+func NewHealthServer(opts *config.Options, isAlive healthcheck.Check, isReady healthcheck.Check) *http.Server {
 	handler := healthcheck.NewHandler()
 	address := net.JoinHostPort(opts.Health.Address, opts.Health.Port)
 	handler.AddLivenessCheck("PipelineCheck", isAlive)
 	handler.AddReadinessCheck("PipelineCheck", isReady)
 
-	server := &Server{
-		handler: handler,
-		Address: address,
-	}
-
-	go server.Serve()
+	server := server.Default(&http.Server{
+		Handler: handler,
+		Addr:    address,
+	})
+
+	go func() {
+		for {
+			err := server.ListenAndServe()
+			log.Errorf("http.ListenAndServe error %v", err)
+			time.Sleep(60 * time.Second)
+		}
+	}()
 
 	return server
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
index a33886633e6a77e38fcab22286638e6e559ec4a0..9780e1bea2fc16863b56d8563dd9700d7d077b2b 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
@@ -37,17 +37,17 @@ const defaultExpiryTime = time.Duration(2 * time.Minute)
 
 type gaugeInfo struct {
 	gauge *prometheus.GaugeVec
-	info  *metricInfo
+	info  *MetricInfo
 }
 
 type counterInfo struct {
 	counter *prometheus.CounterVec
-	info    *metricInfo
+	info    *MetricInfo
 }
 
 type histoInfo struct {
 	histo *prometheus.HistogramVec
-	info  *metricInfo
+	info  *MetricInfo
 }
 
 type EncodeProm struct {
@@ -67,19 +67,19 @@ type EncodeProm struct {
 }
 
 var (
-	metricsProcessed = operational.DefineMetric(
+	MetricsProcessed = operational.DefineMetric(
 		"metrics_processed",
 		"Number of metrics processed",
 		operational.TypeCounter,
 		"stage",
 	)
-	metricsDropped = operational.DefineMetric(
+	MetricsDropped = operational.DefineMetric(
 		"metrics_dropped",
 		"Number of metrics dropped",
 		operational.TypeCounter,
 		"stage",
 	)
-	encodePromErrors = operational.DefineMetric(
+	EncodePromErrors = operational.DefineMetric(
 		"encode_prom_errors",
 		"Total errors during metrics generation",
 		operational.TypeCounter,
@@ -164,7 +164,7 @@ func (e *EncodeProm) Encode(metricRecord config.GenericMap) {
 	}
 }
 
-func (e *EncodeProm) prepareMetric(flow config.GenericMap, info *metricInfo, m *prometheus.MetricVec) (map[string]string, float64) {
+func (e *EncodeProm) prepareMetric(flow config.GenericMap, info *MetricInfo, m *prometheus.MetricVec) (map[string]string, float64) {
 	val := e.extractGenericValue(flow, info)
 	if val == nil {
 		return nil, 0
@@ -178,7 +178,7 @@ func (e *EncodeProm) prepareMetric(flow config.GenericMap, info *metricInfo, m *
 		floatVal = floatVal / info.ValueScale
 	}
 
-	entryLabels, key := e.extractLabelsAndKey(flow, &info.PromMetricsItem)
+	entryLabels, key := ExtractLabelsAndKey(flow, &info.MetricsItem)
 	// Update entry for expiry mechanism (the entry itself is its own cleanup function)
 	_, ok := e.mCache.UpdateCacheEntry(key, func() { m.Delete(entryLabels) })
 	if !ok {
@@ -188,7 +188,7 @@ func (e *EncodeProm) prepareMetric(flow config.GenericMap, info *metricInfo, m *
 	return entryLabels, floatVal
 }
 
-func (e *EncodeProm) prepareAggHisto(flow config.GenericMap, info *metricInfo, m *prometheus.MetricVec) (map[string]string, []float64) {
+func (e *EncodeProm) prepareAggHisto(flow config.GenericMap, info *MetricInfo, m *prometheus.MetricVec) (map[string]string, []float64) {
 	val := e.extractGenericValue(flow, info)
 	if val == nil {
 		return nil, nil
@@ -199,7 +199,7 @@ func (e *EncodeProm) prepareAggHisto(flow config.GenericMap, info *metricInfo, m
 		return nil, nil
 	}
 
-	entryLabels, key := e.extractLabelsAndKey(flow, &info.PromMetricsItem)
+	entryLabels, key := ExtractLabelsAndKey(flow, &info.MetricsItem)
 	// Update entry for expiry mechanism (the entry itself is its own cleanup function)
 	_, ok = e.mCache.UpdateCacheEntry(key, func() { m.Delete(entryLabels) })
 	if !ok {
@@ -209,8 +209,8 @@ func (e *EncodeProm) prepareAggHisto(flow config.GenericMap, info *metricInfo, m
 	return entryLabels, values
 }
 
-func (e *EncodeProm) extractGenericValue(flow config.GenericMap, info *metricInfo) interface{} {
-	for _, pred := range info.filterPredicates {
+func (e *EncodeProm) extractGenericValue(flow config.GenericMap, info *MetricInfo) interface{} {
+	for _, pred := range info.FilterPredicates {
 		if !pred(flow) {
 			return nil
 		}
@@ -227,7 +227,7 @@ func (e *EncodeProm) extractGenericValue(flow config.GenericMap, info *metricInf
 	return val
 }
 
-func (e *EncodeProm) extractLabelsAndKey(flow config.GenericMap, info *api.PromMetricsItem) (map[string]string, string) {
+func ExtractLabelsAndKey(flow config.GenericMap, info *api.MetricsItem) (map[string]string, string) {
 	entryLabels := make(map[string]string, len(info.Labels))
 	key := strings.Builder{}
 	key.WriteString(info.Name)
@@ -295,7 +295,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 		log.Debugf("Labels = %v", labels)
 		mInfo := CreateMetricInfo(mCfg)
 		switch mCfg.Type {
-		case api.PromEncodeOperationName("Counter"):
+		case api.MetricEncodeOperationName("Counter"):
 			counter := prometheus.NewCounterVec(prometheus.CounterOpts{Name: fullMetricName, Help: ""}, labels)
 			err := registerer.Register(counter)
 			if err != nil {
@@ -306,7 +306,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 				counter: counter,
 				info:    mInfo,
 			})
-		case api.PromEncodeOperationName("Gauge"):
+		case api.MetricEncodeOperationName("Gauge"):
 			gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: fullMetricName, Help: ""}, labels)
 			err := registerer.Register(gauge)
 			if err != nil {
@@ -317,7 +317,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 				gauge: gauge,
 				info:  mInfo,
 			})
-		case api.PromEncodeOperationName("Histogram"):
+		case api.MetricEncodeOperationName("Histogram"):
 			log.Debugf("buckets = %v", mCfg.Buckets)
 			hist := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: "", Buckets: mCfg.Buckets}, labels)
 			err := registerer.Register(hist)
@@ -329,7 +329,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 				histo: hist,
 				info:  mInfo,
 			})
-		case api.PromEncodeOperationName("AggHistogram"):
+		case api.MetricEncodeOperationName("AggHistogram"):
 			log.Debugf("buckets = %v", mCfg.Buckets)
 			hist := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: "", Buckets: mCfg.Buckets}, labels)
 			err := registerer.Register(hist)
@@ -365,9 +365,9 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 		mCache:           putils.NewTimedCache(cfg.MaxMetrics, mChacheLenMetric),
 		mChacheLenMetric: mChacheLenMetric,
 		exitChan:         putils.ExitChannel(),
-		metricsProcessed: opMetrics.NewCounter(&metricsProcessed, params.Name),
-		metricsDropped:   opMetrics.NewCounter(&metricsDropped, params.Name),
-		errorsCounter:    opMetrics.NewCounterVec(&encodePromErrors),
+		metricsProcessed: opMetrics.NewCounter(&MetricsProcessed, params.Name),
+		metricsDropped:   opMetrics.NewCounter(&MetricsDropped, params.Name),
+		errorsCounter:    opMetrics.NewCounterVec(&EncodePromErrors),
 	}
 	go w.cleanupExpiredEntriesLoop()
 	return w, nil
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go
index e877dc9a1d20087bd3075bd014996f10ba4bbdd1..15fe7854eed9d229338d7658d68d575f625100c0 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go
@@ -8,28 +8,28 @@ import (
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
 )
 
-type predicate func(flow config.GenericMap) bool
+type Predicate func(flow config.GenericMap) bool
 
-type metricInfo struct {
-	api.PromMetricsItem
-	filterPredicates []predicate
+type MetricInfo struct {
+	api.MetricsItem
+	FilterPredicates []Predicate
 }
 
-func presence(filter api.PromMetricsFilter) predicate {
+func Presence(filter api.MetricsFilter) Predicate {
 	return func(flow config.GenericMap) bool {
 		_, found := flow[filter.Key]
 		return found
 	}
 }
 
-func absence(filter api.PromMetricsFilter) predicate {
+func Absence(filter api.MetricsFilter) Predicate {
 	return func(flow config.GenericMap) bool {
 		_, found := flow[filter.Key]
 		return !found
 	}
 }
 
-func exact(filter api.PromMetricsFilter) predicate {
+func Exact(filter api.MetricsFilter) Predicate {
 	return func(flow config.GenericMap) bool {
 		if val, found := flow[filter.Key]; found {
 			sVal, ok := val.(string)
@@ -42,7 +42,7 @@ func exact(filter api.PromMetricsFilter) predicate {
 	}
 }
 
-func regex(filter api.PromMetricsFilter) predicate {
+func regex(filter api.MetricsFilter) Predicate {
 	r, _ := regexp.Compile(filter.Value)
 	return func(flow config.GenericMap) bool {
 		if val, found := flow[filter.Key]; found {
@@ -56,27 +56,27 @@ func regex(filter api.PromMetricsFilter) predicate {
 	}
 }
 
-func filterToPredicate(filter api.PromMetricsFilter) predicate {
+func filterToPredicate(filter api.MetricsFilter) Predicate {
 	switch filter.Type {
 	case api.PromFilterExact:
-		return exact(filter)
+		return Exact(filter)
 	case api.PromFilterPresence:
-		return presence(filter)
+		return Presence(filter)
 	case api.PromFilterAbsence:
-		return absence(filter)
+		return Absence(filter)
 	case api.PromFilterRegex:
 		return regex(filter)
 	}
-	// Default = exact
-	return exact(filter)
+	// Default = Exact
+	return Exact(filter)
 }
 
-func CreateMetricInfo(def api.PromMetricsItem) *metricInfo {
-	mi := metricInfo{
-		PromMetricsItem: def,
+func CreateMetricInfo(def api.MetricsItem) *MetricInfo {
+	mi := MetricInfo{
+		MetricsItem: def,
 	}
 	for _, f := range def.GetFilters() {
-		mi.filterPredicates = append(mi.filterPredicates, filterToPredicate(f))
+		mi.FilterPredicates = append(mi.FilterPredicates, filterToPredicate(f))
 	}
 	return &mi
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
new file mode 100644
index 0000000000000000000000000000000000000000..d67a073c858e227ebec9bfc034501c5a059c959b
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+	"context"
+
+	sdklog "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+	"github.com/netobserv/flowlogs-pipeline/pkg/api"
+	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	"github.com/netobserv/flowlogs-pipeline/pkg/operational"
+	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+	log "github.com/sirupsen/logrus"
+	"go.opentelemetry.io/otel/sdk/resource"
+)
+
+type EncodeOtlpLogs struct {
+	cfg api.EncodeOtlpLogs
+	ctx context.Context
+	res *resource.Resource
+	lp  *sdklog.LoggerProvider
+}
+
+// Encode encodes a log entry to be exported
+func (e *EncodeOtlpLogs) Encode(entry config.GenericMap) {
+	log.Tracef("entering EncodeOtlpLogs. entry = %v", entry)
+	e.LogWrite(entry)
+}
+
+func NewEncodeOtlpLogs(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+	log.Tracef("entering NewEncodeOtlpLogs \n")
+	cfg := api.EncodeOtlpLogs{}
+	if params.Encode != nil && params.Encode.OtlpLogs != nil {
+		cfg = *params.Encode.OtlpLogs
+	}
+	log.Debugf("NewEncodeOtlpLogs cfg = %v \n", cfg)
+
+	ctx := context.Background()
+	res := newResource()
+
+	lp, err := NewOtlpLoggerProvider(ctx, params, res)
+	if err != nil {
+		return nil, err
+	}
+
+	w := &EncodeOtlpLogs{
+		cfg: cfg,
+		ctx: ctx,
+		res: res,
+		lp:  lp,
+	}
+	return w, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..739a7d4e282efcdb67f099d524f1365ba4bb46e5
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+	"context"
+	"time"
+
+	"github.com/netobserv/flowlogs-pipeline/pkg/api"
+	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	"github.com/netobserv/flowlogs-pipeline/pkg/operational"
+	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+	putils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
+	"github.com/netobserv/flowlogs-pipeline/pkg/utils"
+	"github.com/prometheus/client_golang/prometheus"
+	log "github.com/sirupsen/logrus"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric"
+	sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/resource"
+)
+
+// TODO: Refactor the code that is common with encode_prom
+const (
+	defaultExpiryTime = time.Duration(2 * time.Minute)
+	flpMeterName      = "flp_meter"
+)
+
+type counterInfo struct {
+	counter *metric.Float64Counter
+	info    *encode.MetricInfo
+}
+
+type gaugeInfo struct {
+	gauge *metric.Float64ObservableGauge
+	info  *encode.MetricInfo
+	obs   Float64Gauge
+}
+
+// TBD: Handle histograms
+/*
+type histoInfo struct {
+	histo *metric.Float64Histogram
+	info  *encode.MetricInfo
+}
+*/
+
+type EncodeOtlpMetrics struct {
+	cfg      api.EncodeOtlpMetrics
+	ctx      context.Context
+	res      *resource.Resource
+	mp       *sdkmetric.MeterProvider
+	counters []counterInfo
+	gauges   []gaugeInfo
+	//histos           []histoInfo
+	//aggHistos        []histoInfo
+	expiryTime       time.Duration
+	mCache           *putils.TimedCache
+	exitChan         <-chan struct{}
+	meter            metric.Meter
+	metricsProcessed prometheus.Counter
+	metricsDropped   prometheus.Counter
+	errorsCounter    *prometheus.CounterVec
+}
+
+// Encode encodes a metric to be exported
+func (e *EncodeOtlpMetrics) Encode(metricRecord config.GenericMap) {
+	log.Tracef("entering EncodeOtlpMetrics. entry = %v", metricRecord)
+
+	// Process counters
+	for _, mInfo := range e.counters {
+		labels, value, _ := e.prepareMetric(metricRecord, mInfo.info)
+		if labels == nil {
+			continue
+		}
+		// set attributes using the labels
+		attributes := obtainAttributesFromLabels(labels)
+		(*mInfo.counter).Add(e.ctx, value, metric.WithAttributes(attributes...))
+		e.metricsProcessed.Inc()
+	}
+
+	// Process gauges
+	for _, mInfo := range e.gauges {
+		labels, value, key := e.prepareMetric(metricRecord, mInfo.info)
+		if labels == nil {
+			continue
+		}
+		// set attributes using the labels
+		attributes := obtainAttributesFromLabels(labels)
+		mInfo.obs.Set(key, value, attributes)
+		e.metricsProcessed.Inc()
+	}
+	// TBD: Process histograms
+}
+
+func (e *EncodeOtlpMetrics) prepareMetric(flow config.GenericMap, info *encode.MetricInfo) (map[string]string, float64, string) {
+	val := e.extractGenericValue(flow, info)
+	if val == nil {
+		return nil, 0, ""
+	}
+	floatVal, err := utils.ConvertToFloat64(val)
+	if err != nil {
+		e.errorsCounter.WithLabelValues("ValueConversionError", info.Name, info.ValueKey).Inc()
+		return nil, 0, ""
+	}
+
+	entryLabels, key := encode.ExtractLabelsAndKey(flow, &info.MetricsItem)
+	// Update entry for expiry mechanism (the entry itself is its own cleanup function)
+	_, ok := e.mCache.UpdateCacheEntry(key, entryLabels)
+	if !ok {
+		e.metricsDropped.Inc()
+		return nil, 0, ""
+	}
+	return entryLabels, floatVal, key
+}
+
+func (e *EncodeOtlpMetrics) extractGenericValue(flow config.GenericMap, info *encode.MetricInfo) interface{} {
+	for _, pred := range info.FilterPredicates {
+		if !pred(flow) {
+			return nil
+		}
+	}
+	if info.ValueKey == "" {
+		// No value key means it's a records / flows counter (1 flow = 1 increment), so just return 1
+		return 1
+	}
+	val, found := flow[info.ValueKey]
+	if !found {
+		e.errorsCounter.WithLabelValues("RecordKeyMissing", info.Name, info.ValueKey).Inc()
+		return nil
+	}
+	return val
+}
+
+func NewEncodeOtlpMetrics(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+	log.Tracef("entering NewEncodeOtlpMetrics \n")
+	cfg := api.EncodeOtlpMetrics{}
+	if params.Encode != nil && params.Encode.OtlpMetrics != nil {
+		cfg = *params.Encode.OtlpMetrics
+	}
+	log.Debugf("NewEncodeOtlpMetrics cfg = %v \n", cfg)
+
+	ctx := context.Background()
+	res := newResource()
+
+	mp, err := NewOtlpMetricsProvider(ctx, params, res)
+	if err != nil {
+		return nil, err
+	}
+	meter := mp.Meter(
+		flpMeterName,
+	)
+
+	expiryTime := cfg.ExpiryTime
+	if expiryTime.Duration == 0 {
+		expiryTime.Duration = defaultExpiryTime
+	}
+
+	meterFactory := otel.Meter(flpMeterName)
+	counters := []counterInfo{}
+	gauges := []gaugeInfo{}
+
+	for _, mCfg := range cfg.Metrics {
+		fullMetricName := cfg.Prefix + mCfg.Name
+		labels := mCfg.Labels
+		log.Debugf("fullMetricName = %v", fullMetricName)
+		log.Debugf("Labels = %v", labels)
+		mInfo := encode.CreateMetricInfo(mCfg)
+		switch mCfg.Type {
+		case api.MetricEncodeOperationName("Counter"):
+			counter, err := meter.Float64Counter(fullMetricName)
+			if err != nil {
+				log.Errorf("error during counter creation: %v", err)
+				return nil, err
+			}
+			counters = append(counters, counterInfo{
+				counter: &counter,
+				info:    mInfo,
+			})
+		case api.MetricEncodeOperationName("Gauge"):
+			// at implementation time, only asynchronous gauges are supported by otel in golang
+			obs := Float64Gauge{observations: make(map[string]Float64GaugeEntry)}
+			gauge, err := meterFactory.Float64ObservableGauge(
+				fullMetricName,
+				metric.WithFloat64Callback(obs.Callback),
+			)
+			if err != nil {
+				log.Errorf("error during gauge creation: %v", err)
+				return nil, err
+			}
+			gInfo := gaugeInfo{
+				info:  mInfo,
+				obs:   obs,
+				gauge: &gauge,
+			}
+			gauges = append(gauges, gInfo)
+		// TBD: handle histograms
+		case "default":
+			log.Errorf("invalid metric type = %v, skipping", mCfg.Type)
+			continue
+		}
+	}
+
+	w := &EncodeOtlpMetrics{
+		cfg:              cfg,
+		ctx:              ctx,
+		res:              res,
+		mp:               mp,
+		meter:            meterFactory,
+		counters:         counters,
+		gauges:           gauges,
+		expiryTime:       expiryTime.Duration,
+		mCache:           putils.NewTimedCache(0, nil),
+		exitChan:         putils.ExitChannel(),
+		metricsProcessed: opMetrics.NewCounter(&encode.MetricsProcessed, params.Name),
+		metricsDropped:   opMetrics.NewCounter(&encode.MetricsDropped, params.Name),
+		errorsCounter:    opMetrics.NewCounterVec(&encode.EncodePromErrors),
+	}
+	go w.cleanupExpiredEntriesLoop()
+	return w, nil
+}
+
+// Cleanup - callback function from lru cleanup
+func (e *EncodeOtlpMetrics) Cleanup(cleanupFunc interface{}) {
+	// nothing more to do
+}
+
+func (e *EncodeOtlpMetrics) cleanupExpiredEntriesLoop() {
+	ticker := time.NewTicker(e.expiryTime)
+	for {
+		select {
+		case <-e.exitChan:
+			log.Debugf("exiting cleanupExpiredEntriesLoop because of signal")
+			return
+		case <-ticker.C:
+			e.mCache.CleanupExpiredEntries(e.expiryTime, e.Cleanup)
+		}
+	}
+}
+
+// At present, golang only supports asynchronous gauge, so we have some function here to support this
+
+type Float64GaugeEntry struct {
+	attributes []attribute.KeyValue
+	value      float64
+}
+
+type Float64Gauge struct {
+	observations map[string]Float64GaugeEntry
+}
+
+// Callback implements the callback function for the underlying asynchronous gauge
+// it observes the current state of all previous Set() calls.
+func (f *Float64Gauge) Callback(ctx context.Context, o metric.Float64Observer) error {
+	for _, fEntry := range f.observations {
+		o.Observe(fEntry.value, metric.WithAttributes(fEntry.attributes...))
+	}
+	// re-initialize the observed items
+	f.observations = make(map[string]Float64GaugeEntry)
+	return nil
+}
+
+func (f *Float64Gauge) Set(key string, val float64, attrs []attribute.KeyValue) {
+	f.observations[key] = Float64GaugeEntry{
+		value:      val,
+		attributes: attrs,
+	}
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1fe987919f4020b94374139c38475dbb1c2e2cc
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+	"context"
+	"strings"
+
+	"github.com/netobserv/flowlogs-pipeline/pkg/api"
+	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	"github.com/netobserv/flowlogs-pipeline/pkg/operational"
+	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+	log "github.com/sirupsen/logrus"
+	"go.opentelemetry.io/otel/sdk/resource"
+	sdktrace "go.opentelemetry.io/otel/sdk/trace"
+)
+
+const (
+	flpTracerName     = "flp_tracer"
+	flpEncodeSpanName = "flp_encode"
+)
+
+type EncodeOtlpTrace struct {
+	cfg api.EncodeOtlpTraces
+	ctx context.Context
+	res *resource.Resource
+	tp  *sdktrace.TracerProvider
+}
+
+// Encode encodes a metric to be exported
+func (e *EncodeOtlpTrace) Encode(entry config.GenericMap) {
+	log.Tracef("entering EncodeOtlpTrace. entry = %v", entry)
+	tr := e.tp.Tracer(flpTracerName)
+	ll := len(e.cfg.SpanSplitter)
+
+	// create parent span
+	newCtx, span0 := tr.Start(e.ctx, flpEncodeSpanName)
+	attributes := obtainAttributesFromEntry(entry)
+	span0.SetAttributes(*attributes...)
+	defer span0.End()
+	if ll == 0 {
+		return
+	}
+	// for each item in SpanSplitter, make a separate entry for each listed item
+	// do not include fields that belong exclusively to other items
+	ss := e.cfg.SpanSplitter
+	records := make([]config.GenericMap, ll)
+	keepItem := make([]bool, ll)
+	for i := 0; i < ll; i++ {
+		records[i] = make(config.GenericMap)
+	}
+OUTER:
+	for key, value := range entry {
+		for i := 0; i < ll; i++ {
+			if strings.HasPrefix(key, ss[i]) {
+				trimmed := strings.TrimPrefix(key, ss[i])
+				records[i][trimmed] = value
+				keepItem[i] = true
+				continue OUTER
+			}
+		}
+		// if we reach here, the field did not have any of the prefixes.
+		// copy it into each of the records
+		for i := 0; i < ll; i++ {
+			records[i][key] = value
+		}
+	}
+	// only create child spans for records that have a field directly related to their item
+	for i := 0; i < ll; i++ {
+		if keepItem[i] {
+			_, span := tr.Start(newCtx, ss[i])
+			attributes := obtainAttributesFromEntry(records[i])
+			span.SetAttributes(*attributes...)
+			span.End()
+		}
+	}
+}
+
+func NewEncodeOtlpTraces(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+	log.Tracef("entering NewEncodeOtlpTraces \n")
+	cfg := api.EncodeOtlpTraces{}
+	if params.Encode != nil && params.Encode.OtlpTraces != nil {
+		cfg = *params.Encode.OtlpTraces
+	}
+	log.Debugf("NewEncodeOtlpTraces cfg = %v \n", cfg)
+
+	ctx := context.Background()
+	res := newResource()
+
+	tp, err := NewOtlpTracerProvider(ctx, params, res)
+	if err != nil {
+		return nil, err
+	}
+
+	w := &EncodeOtlpTrace{
+		cfg: cfg,
+		ctx: ctx,
+		res: res,
+		tp:  tp,
+	}
+	return w, nil
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
new file mode 100644
index 0000000000000000000000000000000000000000..dae256f3ad5004c4bbd1eb355e2ec9732fb6f138
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2023 IBM, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package opentelemetry
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"time"
+
+	otel2 "github.com/agoda-com/opentelemetry-logs-go"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+	"github.com/agoda-com/opentelemetry-logs-go/logs"
+	sdklog2 "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+	"github.com/netobserv/flowlogs-pipeline/pkg/api"
+	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	"github.com/netobserv/flowlogs-pipeline/pkg/utils"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+	sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/resource"
+	sdktrace "go.opentelemetry.io/otel/sdk/trace"
+	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+	"google.golang.org/grpc/credentials"
+)
+
+// Note:
+// As of the writing of this module, go.opentelemetry.io does not provide interfaces for logs.
+// We therefore temporarily use agoda-com/opentelemetry-logs-go for logs.
+// When go.opentelemetry.io provides interfaces for logs, the code here should be updated to use those interfaces.
+
+const (
+	flpOtlpLoggerName      = "flp-otlp-logger"
+	defaultTimeInterval    = time.Duration(20 * time.Second)
+	flpOtlpResourceVersion = "v0.1.0"
+	flpOtlpResourceName    = "netobserv-otlp"
+	grpcType               = "grpc"
+	httpType               = "http"
+)
+
+func NewOtlpTracerProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdktrace.TracerProvider, error) {
+	cfg := api.EncodeOtlpTraces{}
+	if params.Encode != nil && params.Encode.OtlpTraces != nil {
+		cfg = *params.Encode.OtlpTraces
+	}
+	if cfg.OtlpConnectionInfo == nil {
+		return nil, fmt.Errorf("otlptraces missing connection info")
+	}
+	addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port)
+	var err error
+	var traceProvider *sdktrace.TracerProvider
+	var traceExporter *otlptrace.Exporter
+	if cfg.ConnectionType == grpcType {
+		var expOption otlptracegrpc.Option
+		var tlsOption otlptracegrpc.Option
+		tlsOption = otlptracegrpc.WithInsecure()
+		if cfg.TLS != nil {
+			tlsConfig, err := cfg.OtlpConnectionInfo.TLS.Build()
+			if err != nil {
+				return nil, err
+			}
+			tlsOption = otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))
+		}
+		expOption = otlptracegrpc.WithEndpoint(addr)
+		traceExporter, err = otlptracegrpc.New(ctx,
+			expOption,
+			tlsOption,
+			otlptracegrpc.WithHeaders(cfg.Headers))
+		if err != nil {
+			return nil, err
+		}
+	} else if cfg.ConnectionType == httpType {
+		var expOption otlptracehttp.Option
+		var tlsOption otlptracehttp.Option
+		tlsOption = otlptracehttp.WithInsecure()
+		if cfg.TLS != nil {
+			tlsConfig, err := cfg.TLS.Build()
+			if err != nil {
+				return nil, err
+			}
+			tlsOption = otlptracehttp.WithTLSClientConfig(tlsConfig)
+		}
+		expOption = otlptracehttp.WithEndpoint(addr)
+		traceExporter, err = otlptracehttp.New(ctx,
+			expOption,
+			tlsOption,
+			otlptracehttp.WithHeaders(cfg.Headers))
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		return nil, fmt.Errorf("must specify grpcaddress or httpaddress")
+	}
+	traceProvider = sdktrace.NewTracerProvider(
+		sdktrace.WithResource(res),
+		sdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(traceExporter)),
+	)
+
+	otel.SetTracerProvider(traceProvider)
+	return traceProvider, nil
+}
+
+func NewOtlpMetricsProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdkmetric.MeterProvider, error) {
+	cfg := api.EncodeOtlpMetrics{}
+	if params.Encode != nil && params.Encode.OtlpMetrics != nil {
+		cfg = *params.Encode.OtlpMetrics
+	}
+	timeInterval := cfg.PushTimeInterval
+	if timeInterval.Duration == 0 {
+		timeInterval.Duration = defaultTimeInterval
+	}
+	if cfg.OtlpConnectionInfo == nil {
+		return nil, fmt.Errorf("otlpmetrics missing connection info")
+	}
+	addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port)
+	var err error
+	var meterProvider *sdkmetric.MeterProvider
+	if cfg.ConnectionType == grpcType {
+		var metricExporter *otlpmetricgrpc.Exporter
+		var expOption otlpmetricgrpc.Option
+		var tlsOption otlpmetricgrpc.Option
+		tlsOption = otlpmetricgrpc.WithInsecure()
+		if cfg.TLS != nil {
+			tlsConfig, err := cfg.TLS.Build()
+			if err != nil {
+				return nil, err
+			}
+			tlsOption = otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))
+		}
+		expOption = otlpmetricgrpc.WithEndpoint(addr)
+		metricExporter, err = otlpmetricgrpc.New(ctx, expOption, tlsOption)
+		if err != nil {
+			return nil, err
+		}
+		meterProvider = sdkmetric.NewMeterProvider(
+			sdkmetric.WithResource(res),
+			sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter,
+				sdkmetric.WithInterval(timeInterval.Duration))),
+		)
+	} else if cfg.ConnectionType == httpType {
+		var metricExporter *otlpmetrichttp.Exporter
+		var expOption otlpmetrichttp.Option
+		var tlsOption otlpmetrichttp.Option
+		tlsOption = otlpmetrichttp.WithInsecure()
+		if cfg.TLS != nil {
+			tlsConfig, err := cfg.TLS.Build()
+			if err != nil {
+				return nil, err
+			}
+			tlsOption = otlpmetrichttp.WithTLSClientConfig(tlsConfig)
+		}
+		expOption = otlpmetrichttp.WithEndpoint(addr)
+		metricExporter, err = otlpmetrichttp.New(ctx, expOption, tlsOption)
+		if err != nil {
+			return nil, err
+		}
+		meterProvider = sdkmetric.NewMeterProvider(
+			sdkmetric.WithResource(res),
+			sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter,
+				sdkmetric.WithInterval(timeInterval.Duration))),
+		)
+	} else {
+		return nil, fmt.Errorf("must specify grpcaddress or httpaddress")
+	}
+
+	otel.SetMeterProvider(meterProvider)
+	return meterProvider, nil
+}
+
+func NewOtlpLoggerProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdklog2.LoggerProvider, error) {
+	cfg := api.EncodeOtlpLogs{}
+	if params.Encode != nil && params.Encode.OtlpLogs != nil {
+		cfg = *params.Encode.OtlpLogs
+	}
+	if cfg.OtlpConnectionInfo == nil {
+		return nil, fmt.Errorf("otlplogs missing connection info")
+	}
+	addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port)
+	var expOption otlplogs.ExporterOption
+	if cfg.ConnectionType == grpcType {
+		var tlsOption otlplogsgrpc.Option
+		tlsOption = otlplogsgrpc.WithInsecure()
+		if params.Encode.OtlpLogs.TLS != nil {
+			tlsConfig, err := cfg.TLS.Build()
+			if err != nil {
+				return nil, err
+			}
+			tlsOption = otlplogsgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))
+		}
+		expOption = otlplogs.WithClient(otlplogsgrpc.NewClient(
+			otlplogsgrpc.WithEndpoint(addr),
+			tlsOption,
+			otlplogsgrpc.WithHeaders(cfg.Headers),
+		))
+	} else if cfg.ConnectionType == httpType {
+		var tlsOption otlplogshttp.Option
+		tlsOption = otlplogshttp.WithInsecure()
+		if params.Encode.OtlpLogs.TLS != nil {
+			tlsConfig, err := cfg.TLS.Build()
+			if err != nil {
+				return nil, err
+			}
+			tlsOption = otlplogshttp.WithTLSClientConfig(tlsConfig)
+		}
+		expOption = otlplogs.WithClient(otlplogshttp.NewClient(
+			otlplogshttp.WithEndpoint(addr),
+			tlsOption,
+			otlplogshttp.WithHeaders(cfg.Headers),
+		))
+	} else {
+		return nil, fmt.Errorf("must specify grpcaddress or httpaddress")
+	}
+	logExporter, err := otlplogs.NewExporter(ctx, expOption)
+	if err != nil {
+		return nil, err
+	}
+
+	loggerProvider := sdklog2.NewLoggerProvider(
+		sdklog2.WithBatcher(logExporter),
+		sdklog2.WithResource(res),
+	)
+	otel2.SetLoggerProvider(loggerProvider)
+	return loggerProvider, nil
+}
+
+func (e *EncodeOtlpLogs) LogWrite(entry config.GenericMap) {
+	now := time.Now()
+	sn := logs.INFO
+	st := "INFO"
+	msgByteArray, _ := json.Marshal(entry)
+	msg := string(msgByteArray)
+	// TODO: Decide whether the content should be delivered as Body or as Attributes
+	lrc := logs.LogRecordConfig{
+		//Timestamp:         &now, // take timestamp from entry, if present?
+		ObservedTimestamp: now,
+		SeverityNumber:    &sn,
+		SeverityText:      &st,
+		Resource:          e.res,
+		Body:              &msg,
+		Attributes:        obtainAttributesFromEntry(entry),
+	}
+	logRecord := logs.NewLogRecord(lrc)
+
+	logger := otel2.GetLoggerProvider().Logger(
+		flpOtlpLoggerName,
+		logs.WithSchemaURL(semconv.SchemaURL),
+	)
+	logger.Emit(logRecord)
+}
+
+func obtainAttributesFromEntry(entry config.GenericMap) *[]attribute.KeyValue {
+	// convert the entry fields to Attributes of the message
+	var att = make([]attribute.KeyValue, len(entry))
+	index := 0
+	for k, v := range entry {
+		switch v.(type) {
+		case string:
+			valString := v
+			att[index] = attribute.String(k, valString.(string))
+		case int, int32, int64, int16, uint, uint8, uint16, uint32, uint64:
+			valInt, _ := utils.ConvertToInt64(v)
+			att[index] = attribute.Int64(k, valInt)
+		case float32, float64:
+			valFloat, _ := utils.ConvertToFloat64(v)
+			att[index] = attribute.Float64(k, valFloat)
+		case bool:
+			valBool := v
+			att[index] = attribute.Bool(k, valBool.(bool))
+		case nil:
+			// skip this field
+			continue
+		}
+		index++
+	}
+	addjustedAtt := att[0:index]
+	return &addjustedAtt
+}
+
+func obtainAttributesFromLabels(labels map[string]string) []attribute.KeyValue {
+	// convert the entry fields to Attributes of the message
+	var att = make([]attribute.KeyValue, len(labels))
+	index := 0
+	for k, v := range labels {
+		att[index] = attribute.String(k, v)
+		index++
+	}
+	return att
+}
+
+func (e *EncodeOtlpMetrics) MetricWrite(entry config.GenericMap) {
+	// nothing more to do at present
+}
+
+// newResource returns a resource describing this application.
+func newResource() *resource.Resource {
+	r, _ := resource.Merge(
+		resource.Default(),
+		resource.NewWithAttributes(
+			semconv.SchemaURL,
+			semconv.ServiceName(flpOtlpResourceName),
+			semconv.ServiceVersion(flpOtlpResourceVersion),
+		),
+	)
+	return r
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
index a3141a6f287abbfc1ce15cb4401de89cb335c1f2..769a2a396444765ab46f040f51978a67fbff8310 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
@@ -10,6 +10,7 @@ import (
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
 	"github.com/netobserv/flowlogs-pipeline/pkg/operational"
 	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode"
+	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry"
 	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract"
 	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack"
 	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest"
@@ -429,6 +430,12 @@ func getEncoder(opMetrics *operational.Metrics, params config.StageParam) (encod
 		encoder, err = encode.NewEncodeKafka(opMetrics, params)
 	case api.S3Type:
 		encoder, err = encode.NewEncodeS3(opMetrics, params)
+	case api.OtlpLogsType:
+		encoder, err = opentelemetry.NewEncodeOtlpLogs(opMetrics, params)
+	case api.OtlpMetricsType:
+		encoder, err = opentelemetry.NewEncodeOtlpMetrics(opMetrics, params)
+	case api.OtlpTracesType:
+		encoder, err = opentelemetry.NewEncodeOtlpTraces(opMetrics, params)
 	case api.NoneType:
 		encoder, _ = encode.NewEncodeNone()
 	default:
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
new file mode 100644
index 0000000000000000000000000000000000000000..7e220c46217c99fabb28dd7964726b79825177b0
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
@@ -0,0 +1,161 @@
+package kubernetes
+
+import (
+	"fmt"
+
+	"github.com/netobserv/flowlogs-pipeline/pkg/api"
+	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	inf "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers"
+	"github.com/sirupsen/logrus"
+)
+
+var informers inf.InformersInterface = &inf.Informers{}
+
+// For testing
+func MockInformers() {
+	informers = inf.NewInformersMock()
+}
+
+func InitFromConfig(kubeConfigPath string) error {
+	return informers.InitFromConfig(kubeConfigPath)
+}
+
+func Enrich(outputEntry config.GenericMap, rule api.NetworkTransformRule) {
+	kubeInfo, err := informers.GetInfo(fmt.Sprintf("%s", outputEntry[rule.Input]))
+	if err != nil {
+		logrus.WithError(err).Tracef("can't find kubernetes info for IP %v", outputEntry[rule.Input])
+		return
+	}
+	if rule.Assignee != "otel" {
+		// NETOBSERV-666: avoid putting empty namespaces or Loki aggregation queries will
+		// differentiate between empty and nil namespaces.
+		if kubeInfo.Namespace != "" {
+			outputEntry[rule.Output+"_Namespace"] = kubeInfo.Namespace
+		}
+		outputEntry[rule.Output+"_Name"] = kubeInfo.Name
+		outputEntry[rule.Output+"_Type"] = kubeInfo.Type
+		outputEntry[rule.Output+"_OwnerName"] = kubeInfo.Owner.Name
+		outputEntry[rule.Output+"_OwnerType"] = kubeInfo.Owner.Type
+		if rule.Parameters != "" {
+			for labelKey, labelValue := range kubeInfo.Labels {
+				outputEntry[rule.Parameters+"_"+labelKey] = labelValue
+			}
+		}
+		if kubeInfo.HostIP != "" {
+			outputEntry[rule.Output+"_HostIP"] = kubeInfo.HostIP
+			if kubeInfo.HostName != "" {
+				outputEntry[rule.Output+"_HostName"] = kubeInfo.HostName
+			}
+		}
+		fillInK8sZone(outputEntry, rule, *kubeInfo, "_Zone")
+	} else {
+		// NOTE: Some of these fields are taken from opentelemetry specs.
+		// See https://opentelemetry.io/docs/specs/semconv/resource/k8s/
+		// Other fields (not specified in the specs) are named similarly
+		if kubeInfo.Namespace != "" {
+			outputEntry[rule.Output+"k8s.namespace.name"] = kubeInfo.Namespace
+		}
+		switch kubeInfo.Type {
+		case inf.TypeNode:
+			outputEntry[rule.Output+"k8s.node.name"] = kubeInfo.Name
+			outputEntry[rule.Output+"k8s.node.uid"] = kubeInfo.UID
+		case inf.TypePod:
+			outputEntry[rule.Output+"k8s.pod.name"] = kubeInfo.Name
+			outputEntry[rule.Output+"k8s.pod.uid"] = kubeInfo.UID
+		case inf.TypeService:
+			outputEntry[rule.Output+"k8s.service.name"] = kubeInfo.Name
+			outputEntry[rule.Output+"k8s.service.uid"] = kubeInfo.UID
+		}
+		outputEntry[rule.Output+"k8s.name"] = kubeInfo.Name
+		outputEntry[rule.Output+"k8s.type"] = kubeInfo.Type
+		outputEntry[rule.Output+"k8s.owner.name"] = kubeInfo.Owner.Name
+		outputEntry[rule.Output+"k8s.owner.type"] = kubeInfo.Owner.Type
+		if rule.Parameters != "" {
+			for labelKey, labelValue := range kubeInfo.Labels {
+				outputEntry[rule.Parameters+"."+labelKey] = labelValue
+			}
+		}
+		if kubeInfo.HostIP != "" {
+			outputEntry[rule.Output+"k8s.host.ip"] = kubeInfo.HostIP
+			if kubeInfo.HostName != "" {
+				outputEntry[rule.Output+"k8s.host.name"] = kubeInfo.HostName
+			}
+		}
+		fillInK8sZone(outputEntry, rule, *kubeInfo, "k8s.zone")
+	}
+}
+
+const nodeZoneLabelName = "topology.kubernetes.io/zone"
+
+func fillInK8sZone(outputEntry config.GenericMap, rule api.NetworkTransformRule, kubeInfo inf.Info, zonePrefix string) {
+	if rule.Kubernetes == nil || !rule.Kubernetes.AddZone {
+		//Nothing to do
+		return
+	}
+	switch kubeInfo.Type {
+	case inf.TypeNode:
+		zone, ok := kubeInfo.Labels[nodeZoneLabelName]
+		if ok {
+			outputEntry[rule.Output+zonePrefix] = zone
+		}
+		return
+	case inf.TypePod:
+		nodeInfo, err := informers.GetNodeInfo(kubeInfo.HostName)
+		if err != nil {
+			logrus.WithError(err).Tracef("can't find nodes info for node %v", kubeInfo.HostName)
+			return
+		}
+		if nodeInfo != nil {
+			zone, ok := nodeInfo.Labels[nodeZoneLabelName]
+			if ok {
+				outputEntry[rule.Output+zonePrefix] = zone
+			}
+		}
+		return
+
+	case inf.TypeService:
+		//A service is not assigned to a dedicated zone, skipping
+		return
+	}
+}
+
+func EnrichLayer(outputEntry config.GenericMap, rule api.NetworkTransformRule) {
+	if rule.KubernetesInfra == nil {
+		logrus.Error("transformation rule: Missing Kubernetes Infra configuration ")
+		return
+	}
+	outputEntry[rule.KubernetesInfra.Output] = "infra"
+	for _, input := range rule.KubernetesInfra.Inputs {
+		if objectIsApp(fmt.Sprintf("%s", outputEntry[input]), rule.KubernetesInfra.InfraPrefix) {
+			outputEntry[rule.KubernetesInfra.Output] = "app"
+			return
+		}
+	}
+}
+
+const openshiftNamespacePrefix = "openshift-"
+const openshiftPrefixLen = len(openshiftNamespacePrefix)
+
+func objectIsApp(addr string, additionalInfraPrefix string) bool {
+	obj, err := informers.GetInfo(addr)
+	if err != nil {
+		logrus.WithError(err).Tracef("can't find kubernetes info for IP %s", addr)
+		return false
+	}
+	nsLen := len(obj.Namespace)
+	additionalPrefixLen := len(additionalInfraPrefix)
+	if nsLen == 0 {
+		return false
+	}
+	if nsLen >= openshiftPrefixLen && obj.Namespace[:openshiftPrefixLen] == openshiftNamespacePrefix {
+		return false
+	}
+	if nsLen >= additionalPrefixLen && obj.Namespace[:additionalPrefixLen] == additionalInfraPrefix {
+		return false
+	}
+	//Special case with openshift and kubernetes service in default namespace
+	if obj.Namespace == "default" && (obj.Name == "kubernetes" || obj.Name == "openshift") {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/kubernetes-mock.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
similarity index 72%
rename from vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/kubernetes-mock.go
rename to vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
index 4d72405ced1ab8e813c7225bfa24d5b6f9cc7a11..9d6307308041695c5d87a5c39922524b792b1b72 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/kubernetes-mock.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
@@ -1,17 +1,25 @@
-package kubernetes
+package informers
 
 import (
+	"errors"
+
 	"github.com/stretchr/testify/mock"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/tools/cache"
 )
 
-type KubeDataMock struct {
+type InformersMock struct {
 	mock.Mock
-	kubeDataInterface
+	InformersInterface
+}
+
+func NewInformersMock() *InformersMock {
+	inf := new(InformersMock)
+	inf.On("InitFromConfig", mock.Anything).Return(nil)
+	return inf
 }
 
-func (o *KubeDataMock) InitFromConfig(kubeConfigPath string) error {
+func (o *InformersMock) InitFromConfig(kubeConfigPath string) error {
 	args := o.Called(kubeConfigPath)
 	return args.Error(0)
 }
@@ -94,7 +102,7 @@ func (m *IndexerMock) FallbackNotFound() {
 	m.On("ByIndex", IndexIP, mock.Anything).Return([]interface{}{}, nil)
 }
 
-func SetupIndexerMocks(kd *KubeData) (pods, nodes, svc, rs *IndexerMock) {
+func SetupIndexerMocks(kd *Informers) (pods, nodes, svc, rs *IndexerMock) {
 	// pods informer
 	pods = &IndexerMock{}
 	pim := InformerMock{}
@@ -117,3 +125,36 @@ func SetupIndexerMocks(kd *KubeData) (pods, nodes, svc, rs *IndexerMock) {
 	kd.replicaSets = &rim
 	return
 }
+
+type FakeInformers struct {
+	InformersInterface
+	info  map[string]*Info
+	nodes map[string]*Info
+}
+
+func SetupStubs(info map[string]*Info, nodes map[string]*Info) *FakeInformers {
+	return &FakeInformers{
+		info:  info,
+		nodes: nodes,
+	}
+}
+
+func (f *FakeInformers) InitFromConfig(_ string) error {
+	return nil
+}
+
+func (f *FakeInformers) GetInfo(n string) (*Info, error) {
+	i := f.info[n]
+	if i != nil {
+		return i, nil
+	}
+	return nil, errors.New("notFound")
+}
+
+func (f *FakeInformers) GetNodeInfo(n string) (*Info, error) {
+	i := f.nodes[n]
+	if i != nil {
+		return i, nil
+	}
+	return nil, errors.New("notFound")
+}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/kubernetes.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
similarity index 86%
rename from vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/kubernetes.go
rename to vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
index 87bc83f8fbe627c11f1e83f54fc554ab2c6e4e88..344db71d1772dfa4951d6553fa1903091e3f184b 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/kubernetes.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
@@ -15,7 +15,7 @@
  *
  */
 
-package kubernetes
+package informers
 
 import (
 	"fmt"
@@ -25,12 +25,12 @@ import (
 	"time"
 
 	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni"
+	"github.com/sirupsen/logrus"
 
-	log "github.com/sirupsen/logrus"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/client-go/informers"
+	inf "k8s.io/client-go/informers"
 	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/metadata"
 	"k8s.io/client-go/metadata/metadatainformer"
@@ -39,23 +39,25 @@ import (
 	"k8s.io/client-go/tools/clientcmd"
 )
 
-var Data kubeDataInterface = &KubeData{}
-
 const (
 	kubeConfigEnvVariable = "KUBECONFIG"
 	syncTime              = 10 * time.Minute
 	IndexIP               = "byIP"
-	typeNode              = "Node"
-	typePod               = "Pod"
-	typeService           = "Service"
+	TypeNode              = "Node"
+	TypePod               = "Pod"
+	TypeService           = "Service"
 )
 
-type kubeDataInterface interface {
+var log = logrus.WithField("component", "transform.Network.Kubernetes")
+
+type InformersInterface interface {
 	GetInfo(string) (*Info, error)
+	GetNodeInfo(string) (*Info, error)
 	InitFromConfig(string) error
 }
 
-type KubeData struct {
+type Informers struct {
+	InformersInterface
 	// pods, nodes and services cache the different object types as *Info pointers
 	pods     cache.SharedIndexInformer
 	nodes    cache.SharedIndexInformer
@@ -92,7 +94,7 @@ var commonIndexers = map[string]cache.IndexFunc{
 	},
 }
 
-func (k *KubeData) GetInfo(ip string) (*Info, error) {
+func (k *Informers) GetInfo(ip string) (*Info, error) {
 	if info, ok := k.fetchInformers(ip); ok {
 		// Owner data might be discovered after the owned, so we fetch it
 		// at the last moment
@@ -105,7 +107,7 @@ func (k *KubeData) GetInfo(ip string) (*Info, error) {
 	return nil, fmt.Errorf("informers can't find IP %s", ip)
 }
 
-func (k *KubeData) fetchInformers(ip string) (*Info, bool) {
+func (k *Informers) fetchInformers(ip string) (*Info, bool) {
 	if info, ok := infoForIP(k.pods.GetIndexer(), ip); ok {
 		// it might happen that the Host is discovered after the Pod
 		if info.HostName == "" {
@@ -134,7 +136,17 @@ func infoForIP(idx cache.Indexer, ip string) (*Info, bool) {
 	return objs[0].(*Info), true
 }
 
-func (k *KubeData) getOwner(info *Info) Owner {
+func (k *Informers) GetNodeInfo(name string) (*Info, error) {
+	item, ok, err := k.nodes.GetIndexer().GetByKey(name)
+	if err != nil {
+		return nil, err
+	} else if ok {
+		return item.(*Info), nil
+	}
+	return nil, nil
+}
+
+func (k *Informers) getOwner(info *Info) Owner {
 	if len(info.OwnerReferences) != 0 {
 		ownerReference := info.OwnerReferences[0]
 		if ownerReference.Kind != "ReplicaSet" {
@@ -165,7 +177,7 @@ func (k *KubeData) getOwner(info *Info) Owner {
 	}
 }
 
-func (k *KubeData) getHostName(hostIP string) string {
+func (k *Informers) getHostName(hostIP string) string {
 	if hostIP != "" {
 		if info, ok := infoForIP(k.nodes.GetIndexer(), hostIP); ok {
 			return info.Name
@@ -174,7 +186,7 @@ func (k *KubeData) getHostName(hostIP string) string {
 	return ""
 }
 
-func (k *KubeData) initNodeInformer(informerFactory informers.SharedInformerFactory) error {
+func (k *Informers) initNodeInformer(informerFactory inf.SharedInformerFactory) error {
 	nodes := informerFactory.Core().V1().Nodes().Informer()
 	// Transform any *v1.Node instance into a *Info instance to save space
 	// in the informer's cache
@@ -204,7 +216,7 @@ func (k *KubeData) initNodeInformer(informerFactory informers.SharedInformerFact
 				Labels:    node.Labels,
 			},
 			ips:  ips,
-			Type: typeNode,
+			Type: TypeNode,
 			// We duplicate HostIP and HostName information to simplify later filtering e.g. by
 			// Host IP, where we want to get all the Pod flows by src/dst host, but also the actual
 			// host-to-host flows by the same field.
@@ -221,7 +233,7 @@ func (k *KubeData) initNodeInformer(informerFactory informers.SharedInformerFact
 	return nil
 }
 
-func (k *KubeData) initPodInformer(informerFactory informers.SharedInformerFactory) error {
+func (k *Informers) initPodInformer(informerFactory inf.SharedInformerFactory) error {
 	pods := informerFactory.Core().V1().Pods().Informer()
 	// Transform any *v1.Pod instance into a *Info instance to save space
 	// in the informer's cache
@@ -244,9 +256,10 @@ func (k *KubeData) initPodInformer(informerFactory informers.SharedInformerFacto
 				Labels:          pod.Labels,
 				OwnerReferences: pod.OwnerReferences,
 			},
-			Type:   typePod,
-			HostIP: pod.Status.HostIP,
-			ips:    ips,
+			Type:     TypePod,
+			HostIP:   pod.Status.HostIP,
+			HostName: pod.Spec.NodeName,
+			ips:      ips,
 		}, nil
 	}); err != nil {
 		return fmt.Errorf("can't set pods transform: %w", err)
@@ -259,7 +272,7 @@ func (k *KubeData) initPodInformer(informerFactory informers.SharedInformerFacto
 	return nil
 }
 
-func (k *KubeData) initServiceInformer(informerFactory informers.SharedInformerFactory) error {
+func (k *Informers) initServiceInformer(informerFactory inf.SharedInformerFactory) error {
 	services := informerFactory.Core().V1().Services().Informer()
 	// Transform any *v1.Service instance into a *Info instance to save space
 	// in the informer's cache
@@ -281,7 +294,7 @@ func (k *KubeData) initServiceInformer(informerFactory informers.SharedInformerF
 				Namespace: svc.Namespace,
 				Labels:    svc.Labels,
 			},
-			Type: typeService,
+			Type: TypeService,
 			ips:  ips,
 		}, nil
 	}); err != nil {
@@ -295,7 +308,7 @@ func (k *KubeData) initServiceInformer(informerFactory informers.SharedInformerF
 	return nil
 }
 
-func (k *KubeData) initReplicaSetInformer(informerFactory metadatainformer.SharedInformerFactory) error {
+func (k *Informers) initReplicaSetInformer(informerFactory metadatainformer.SharedInformerFactory) error {
 	k.replicaSets = informerFactory.ForResource(
 		schema.GroupVersionResource{
 			Group:    "apps",
@@ -320,12 +333,12 @@ func (k *KubeData) initReplicaSetInformer(informerFactory metadatainformer.Share
 	return nil
 }
 
-func (k *KubeData) InitFromConfig(kubeConfigPath string) error {
+func (k *Informers) InitFromConfig(kubeConfigPath string) error {
 	// Initialization variables
 	k.stopChan = make(chan struct{})
 	k.mdStopChan = make(chan struct{})
 
-	config, err := LoadConfig(kubeConfigPath)
+	config, err := loadConfig(kubeConfigPath)
 	if err != nil {
 		return err
 	}
@@ -348,7 +361,7 @@ func (k *KubeData) InitFromConfig(kubeConfigPath string) error {
 	return nil
 }
 
-func LoadConfig(kubeConfigPath string) (*rest.Config, error) {
+func loadConfig(kubeConfigPath string) (*rest.Config, error) {
 	// if no config path is provided, load it from the env variable
 	if kubeConfigPath == "" {
 		kubeConfigPath = os.Getenv(kubeConfigEnvVariable)
@@ -375,8 +388,8 @@ func LoadConfig(kubeConfigPath string) (*rest.Config, error) {
 	return config, nil
 }
 
-func (k *KubeData) initInformers(client kubernetes.Interface, metaClient metadata.Interface) error {
-	informerFactory := informers.NewSharedInformerFactory(client, syncTime)
+func (k *Informers) initInformers(client kubernetes.Interface, metaClient metadata.Interface) error {
+	informerFactory := inf.NewSharedInformerFactory(client, syncTime)
 	metadataInformerFactory := metadatainformer.NewSharedInformerFactory(metaClient, syncTime)
 	err := k.initNodeInformer(informerFactory)
 	if err != nil {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
index 112517bb8a3c89d5f7d18df956d883b0fd7a4047..c0bfdc5a633f4be567cb074078b0aebc1b58e50d 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
@@ -19,7 +19,6 @@ package location
 
 import (
 	"archive/zip"
-	"crypto/tls"
 	"errors"
 	"fmt"
 	"io"
@@ -28,6 +27,7 @@ import (
 	"path/filepath"
 	"strings"
 	"sync"
+	"time"
 
 	"github.com/ip2location/ip2location-go/v9"
 	log "github.com/sirupsen/logrus"
@@ -43,11 +43,11 @@ type Info struct {
 }
 
 const (
-	DBFilename        = "IP2LOCATION-LITE-DB9.BIN"
-	DBFileLocation    = "/tmp/location_db.bin"
-	DBZIPFileLocation = "/tmp/location_db.bin" + ".zip"
+	dbFilename        = "IP2LOCATION-LITE-DB9.BIN"
+	dbFileLocation    = "/tmp/location_db.bin"
+	dbZIPFileLocation = "/tmp/location_db.bin" + ".zip"
 	// REF: Original location from ip2location DB is: "https://www.ip2location.com/download/?token=OpOljbgT6K2WJnFrFBBmBzRVNpHlcYqNN4CMeGavvh0pPOpyu16gKQyqvDMxTDF4&file=DB9LITEBIN"
-	DbUrl = "https://raw.githubusercontent.com/netobserv/flowlogs-pipeline/main/contrib/location/location.db"
+	dbUrl = "https://raw.githubusercontent.com/netobserv/flowlogs-pipeline/main/contrib/location/location.db"
 )
 
 var locationDB *ip2location.DB
@@ -70,7 +70,7 @@ func init() {
 	_osio.MkdirAll = os.MkdirAll
 	_osio.OpenFile = os.OpenFile
 	_osio.Copy = io.Copy
-	_dbURL = DbUrl
+	_dbURL = dbUrl
 	locationDBMutex = &sync.Mutex{}
 }
 
@@ -78,17 +78,16 @@ func InitLocationDB() error {
 	locationDBMutex.Lock()
 	defer locationDBMutex.Unlock()
 
-	if _, statErr := _osio.Stat(DBFileLocation); errors.Is(statErr, os.ErrNotExist) {
-		log.Infof("Downloading location DB into local file %s ", DBFileLocation)
-		out, createErr := _osio.Create(DBZIPFileLocation)
+	if _, statErr := _osio.Stat(dbFileLocation); errors.Is(statErr, os.ErrNotExist) {
+		log.Infof("Downloading location DB into local file %s ", dbFileLocation)
+		out, createErr := _osio.Create(dbZIPFileLocation)
 		if createErr != nil {
 			return fmt.Errorf("failed os.Create %v ", createErr)
 		}
 
-		tr := &http.Transport{
-			TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
-		}
-		client := &http.Client{Transport: tr}
+		timeout := time.Minute
+		tr := &http.Transport{IdleConnTimeout: timeout}
+		client := &http.Client{Transport: tr, Timeout: timeout}
 		resp, getErr := client.Get(_dbURL)
 		if getErr != nil {
 			return fmt.Errorf("failed http.Get %v ", getErr)
@@ -101,7 +100,7 @@ func InitLocationDB() error {
 			return fmt.Errorf("failed io.Copy %v ", copyErr)
 		}
 
-		log.Infof("Wrote %d bytes to %s", written, DBZIPFileLocation)
+		log.Infof("Wrote %d bytes to %s", written, dbZIPFileLocation)
 
 		bodyCloseErr := resp.Body.Close()
 		if bodyCloseErr != nil {
@@ -113,13 +112,13 @@ func InitLocationDB() error {
 			return fmt.Errorf("failed out.Close %v ", outCloseErr)
 		}
 
-		unzipErr := unzip(DBZIPFileLocation, DBFileLocation)
+		unzipErr := unzip(dbZIPFileLocation, dbFileLocation)
 		if unzipErr != nil {
-			file, openErr := os.Open(DBFileLocation + "/" + DBFilename)
+			file, openErr := os.Open(dbFileLocation + "/" + dbFilename)
 			if openErr == nil {
 				fi, fileStatErr := file.Stat()
 				if fileStatErr == nil {
-					log.Infof("length of %s is: %d", DBFileLocation+"/"+DBFilename, fi.Size())
+					log.Infof("length of %s is: %d", dbFileLocation+"/"+dbFilename, fi.Size())
 					_ = file.Close()
 				} else {
 					log.Infof("file.Stat err %v", fileStatErr)
@@ -128,9 +127,9 @@ func InitLocationDB() error {
 				log.Infof("os.Open err %v", openErr)
 			}
 
-			fileContent, readFileErr := os.ReadFile(DBFileLocation + "/" + DBFilename)
+			fileContent, readFileErr := os.ReadFile(dbFileLocation + "/" + dbFilename)
 			if readFileErr == nil {
-				log.Infof("content of first 100 bytes of %s  is: %s", DBFileLocation+"/"+DBFilename, fileContent[:100])
+				log.Infof("content of first 100 bytes of %s is: %s", dbFileLocation+"/"+dbFilename, fileContent[:100])
 			} else {
 				log.Infof("os.ReadFile err %v", readFileErr)
 			}
@@ -142,7 +141,7 @@ func InitLocationDB() error {
 	}
 
 	log.Debugf("Loading location DB")
-	db, openDBErr := ip2location.OpenDB(DBFileLocation + "/" + DBFilename)
+	db, openDBErr := ip2location.OpenDB(dbFileLocation + "/" + dbFilename)
 	if openDBErr != nil {
 		return fmt.Errorf("OpenDB err - %v ", openDBErr)
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
index 35f4d2873567040a3972371ce6ee221e4fa6a4c5..f6c4ea8ee2a6edaecbe166b39fe9e86f15b6edfa 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
@@ -98,31 +98,9 @@ func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bo
 			}
 			outputEntry[rule.Output] = serviceName
 		case api.OpAddKubernetes:
-			kubeInfo, err := kubernetes.Data.GetInfo(fmt.Sprintf("%s", outputEntry[rule.Input]))
-			if err != nil {
-				logrus.WithError(err).Tracef("can't find kubernetes info for IP %v", outputEntry[rule.Input])
-				continue
-			}
-			// NETOBSERV-666: avoid putting empty namespaces or Loki aggregation queries will
-			// differentiate between empty and nil namespaces.
-			if kubeInfo.Namespace != "" {
-				outputEntry[rule.Output+"_Namespace"] = kubeInfo.Namespace
-			}
-			outputEntry[rule.Output+"_Name"] = kubeInfo.Name
-			outputEntry[rule.Output+"_Type"] = kubeInfo.Type
-			outputEntry[rule.Output+"_OwnerName"] = kubeInfo.Owner.Name
-			outputEntry[rule.Output+"_OwnerType"] = kubeInfo.Owner.Type
-			if rule.Parameters != "" {
-				for labelKey, labelValue := range kubeInfo.Labels {
-					outputEntry[rule.Parameters+"_"+labelKey] = labelValue
-				}
-			}
-			if kubeInfo.HostIP != "" {
-				outputEntry[rule.Output+"_HostIP"] = kubeInfo.HostIP
-				if kubeInfo.HostName != "" {
-					outputEntry[rule.Output+"_HostName"] = kubeInfo.HostName
-				}
-			}
+			kubernetes.Enrich(outputEntry, rule)
+		case api.OpAddKubernetesInfra:
+			kubernetes.EnrichLayer(outputEntry, rule)
 		case api.OpReinterpretDirection:
 			reinterpretDirection(outputEntry, &n.DirectionInfo)
 		case api.OpAddIPCategory:
@@ -172,6 +150,8 @@ func NewTransformNetwork(params config.StageParam) (Transformer, error) {
 			needToInitLocationDB = true
 		case api.OpAddKubernetes:
 			needToInitKubeData = true
+		case api.OpAddKubernetesInfra:
+			needToInitKubeData = true
 		case api.OpAddService:
 			needToInitNetworkServices = true
 		case api.OpReinterpretDirection:
@@ -193,7 +173,7 @@ func NewTransformNetwork(params config.StageParam) (Transformer, error) {
 	}
 
 	if needToInitKubeData {
-		err := kubernetes.Data.InitFromConfig(jsonNetworkTransform.KubeConfigPath)
+		err := kubernetes.InitFromConfig(jsonNetworkTransform.KubeConfigPath)
 		if err != nil {
 			return nil, err
 		}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go
index d40000bc032da83895cbc89ea00868739803b6fa..19a22658323c08449f2f37e1ba0d2233fb5833d5 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go
@@ -24,6 +24,7 @@ import (
 
 	"github.com/netobserv/flowlogs-pipeline/pkg/api"
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
+	"github.com/netobserv/flowlogs-pipeline/pkg/server"
 	prom "github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"github.com/sirupsen/logrus"
@@ -63,7 +64,7 @@ func StartServerAsync(conn *api.PromConnectionInfo, registry *prom.Registry) *ht
 	addr := fmt.Sprintf("%s:%v", conn.Address, port)
 	plog.Infof("StartServerAsync: addr = %s", addr)
 
-	httpServer := http.Server{
+	httpServer := &http.Server{
 		Addr: addr,
 		// TLS clients must use TLS 1.2 or higher
 		TLSConfig: &tls.Config{
@@ -79,6 +80,7 @@ func StartServerAsync(conn *api.PromConnectionInfo, registry *prom.Registry) *ht
 		mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
 	}
 	httpServer.Handler = mux
+	httpServer = server.Default(httpServer)
 
 	go func() {
 		var err error
@@ -92,5 +94,5 @@ func StartServerAsync(conn *api.PromConnectionInfo, registry *prom.Registry) *ht
 		}
 	}()
 
-	return &httpServer
+	return httpServer
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go
new file mode 100644
index 0000000000000000000000000000000000000000..92fdb7fb56c182896115aa37d6a6495c0a40a441
--- /dev/null
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go
@@ -0,0 +1,46 @@
+package server
+
+import (
+	"crypto/tls"
+	"net/http"
+	"time"
+
+	"github.com/sirupsen/logrus"
+)
+
+var slog = logrus.WithField("module", "server")
+
+func Default(srv *http.Server) *http.Server {
+	// defaults taken from https://bruinsslot.jp/post/go-secure-webserver/ can be overriden by caller
+	if srv.Handler != nil {
+		// No more than 2MB body
+		srv.Handler = http.MaxBytesHandler(srv.Handler, 2<<20)
+	} else {
+		slog.Warnf("Handler not yet set on server while securing defaults. Make sure a MaxByte middleware is used.")
+	}
+	if srv.ReadTimeout == 0 {
+		srv.ReadTimeout = 10 * time.Second
+	}
+	if srv.ReadHeaderTimeout == 0 {
+		srv.ReadHeaderTimeout = 5 * time.Second
+	}
+	if srv.WriteTimeout == 0 {
+		srv.WriteTimeout = 10 * time.Second
+	}
+	if srv.IdleTimeout == 0 {
+		srv.IdleTimeout = 120 * time.Second
+	}
+	if srv.MaxHeaderBytes == 0 {
+		srv.MaxHeaderBytes = 1 << 20 // 1MB
+	}
+	if srv.TLSConfig == nil {
+		srv.TLSConfig = &tls.Config{}
+	}
+	if srv.TLSConfig.MinVersion == 0 {
+		srv.TLSConfig.MinVersion = tls.VersionTLS13
+	}
+	// Disable http/2
+	srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0)
+
+	return srv
+}
diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go
index e01529f1db8abc8eb773c039fd8c927cc09e6202..8191b632fbf594685c869b846d01f96a010a7568 100644
--- a/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go
+++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go
@@ -374,8 +374,10 @@ func DecodeMessageContext(ctx context.Context, payload *bytes.Buffer, templateKe
 	} else {
 		return nil, fmt.Errorf("NetFlow/IPFIX version error: %d", version)
 	}
+	read := 16
+	startSize := payload.Len()
 
-	for i := 0; ((i < int(size) && version == 9) || version == 10) && payload.Len() > 0; i++ {
+	for i := 0; ((i < int(size) && version == 9) || (uint16(read) < size && version == 10)) && payload.Len() > 0; i++ {
 		fsheader := FlowSetHeader{}
 		if err := utils.BinaryDecoder(payload, &fsheader.Id, &fsheader.Length); err != nil {
 			return returnItem, fmt.Errorf("Error decoding FlowSet header: %v", err)
@@ -522,6 +524,7 @@ func DecodeMessageContext(ctx context.Context, payload *bytes.Buffer, templateKe
 		} else if version == 10 && flowSet != nil {
 			packetIPFIX.FlowSets = append(packetIPFIX.FlowSets, flowSet)
 		}
+		read = startSize - payload.Len() + 16
 	}
 
 	if version == 9 {
diff --git a/vendor/github.com/netsampler/goflow2/format/common/text.go b/vendor/github.com/netsampler/goflow2/format/common/text.go
index d97ed68294e868756926f78cb24323c4d571df33..6e009c583e407058fb4ecce1e6295d4650833651 100644
--- a/vendor/github.com/netsampler/goflow2/format/common/text.go
+++ b/vendor/github.com/netsampler/goflow2/format/common/text.go
@@ -186,7 +186,10 @@ func FormatMessageReflectCustom(msg interface{}, ext, quotes, sep, sign string,
 		}
 		fieldValue := vfm.FieldByName(fieldName)
 		// todo: replace s by json mapping of protobuf
-		if fieldValue.IsValid() {
+		if renderer, ok := RenderExtras[fieldName]; ok {
+			fstr[i] = fmt.Sprintf("%s%s%s%s%q", quotes, s, quotes, sign, renderer(msg))
+			i++
+		} else if fieldValue.IsValid() {
 
 			if fieldType, ok := TextFields[fieldName]; ok {
 				switch fieldType {
@@ -213,8 +216,6 @@ func FormatMessageReflectCustom(msg interface{}, ext, quotes, sep, sign string,
 
 					}
 				}
-			} else if renderer, ok := RenderExtras[fieldName]; ok {
-				fstr[i] = fmt.Sprintf("%s%s%s%s%q", quotes, s, quotes, sign, renderer(msg))
 			} else {
 				// handle specific types here
 				switch fieldValue.Kind() {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 1feba62c6c93053a083008becc2330b0f6744694..b5c8bcb395aa5d8bad3e077b50550267bfe257d2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -475,6 +475,9 @@ type HistogramOpts struct {
 
 	// now is for testing purposes, by default it's time.Now.
 	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
 }
 
 // HistogramVecOpts bundles the options to create a HistogramVec metric.
@@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 	if opts.now == nil {
 		opts.now = time.Now
 	}
-
+	if opts.afterFunc == nil {
+		opts.afterFunc = time.AfterFunc
+	}
 	h := &histogram{
 		desc:                            desc,
 		upperBounds:                     opts.Buckets,
@@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
 		lastResetTime:                   opts.now(),
 		now:                             opts.now,
+		afterFunc:                       opts.afterFunc,
 	}
 	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
 		h.upperBounds = DefBuckets
@@ -716,9 +722,16 @@ type histogram struct {
 	nativeHistogramMinResetDuration time.Duration
 	// lastResetTime is protected by mtx. It is also used as created timestamp.
 	lastResetTime time.Time
+	// resetScheduled is protected by mtx. It is true if a reset is
+	// scheduled for a later time (when nativeHistogramMinResetDuration has
+	// passed).
+	resetScheduled bool
 
 	// now is for testing purposes, by default it's time.Now.
 	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
 }
 
 func (h *histogram) Desc() *Desc {
@@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
 	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
 		return
 	}
+	// One of the other strategies will happen. To undo what they will do as
+	// soon as enough time has passed to satisfy
+	// h.nativeHistogramMinResetDuration, schedule a reset at the right time
+	// if we haven't done so already.
+	if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+		h.resetScheduled = true
+		h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+	}
+
 	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
 		return
 	}
 	h.doubleBucketWidth(hotCounts, coldCounts)
 }
 
-// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
-// has been passed. It returns true if the histogram has been reset. The caller
-// must have locked h.mtx.
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
 func (h *histogram) maybeReset(
 	hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
 ) bool {
 	// We are using the possibly mocked h.now() rather than
 	// time.Since(h.lastResetTime) to enable testing.
-	if h.nativeHistogramMinResetDuration == 0 ||
+	if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+		h.resetScheduled || // Do not interefere if a reset is already scheduled.
 		h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
 		return false
 	}
@@ -906,6 +929,29 @@ func (h *histogram) maybeReset(
 	return true
 }
 
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hot := h.counts[hotIdx]
+	cold := h.counts[coldIdx]
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	h.resetScheduled = false
+}
+
 // maybeWidenZeroBucket widens the zero bucket until it includes the existing
 // buckets closest to the zero bucket (which could be two, if an equidistant
 // negative and a positive bucket exists, but usually it's only one bucket to be
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index b3c4eca2bc1c80704bf783b3352bb6e0db68023d..c21911f292d4ef8604cfc58f6f8a21cb2a740fda 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
 
 func validateLabelValues(vals []string, expectedNumberOfValues int) error {
 	if len(vals) != expectedNumberOfValues {
+		// The call below makes vals escape, copy them to avoid that.
+		vals := append([]string(nil), vals...)
 		return fmt.Errorf(
 			"%w: expected %d label values but got %d in %#v",
 			errInconsistentCardinality, expectedNumberOfValues,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index c0152cdb613af402df766c377dd5ffc4406588a8..8c1136ceea35577e4c1d46c9ffa336f7fbb24749 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,8 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//go:build !windows && !js
-// +build !windows,!js
+//go:build !windows && !js && !wasip1
+// +build !windows,!js,!wasip1
 
 package prometheus
 
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8d9a6d7a2fa3ec8ec095a8017376ec45fcec80d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1
+// +build wasip1
+
+package prometheus
+
+func canCollectProcess() bool {
+	return false
+}
+
+func (*processCollector) processCollect(chan<- Metric) {
+	// noop on this platform
+	return
+}
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 84946b27035412ef78f64d7b8a82f0e732085297..cee360db7f3921e62df16e48eb580db88f7ad663 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -474,6 +474,9 @@ type Histogram struct {
 	NegativeDelta []int64   `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
 	NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`  // Absolute count of each bucket.
 	// Positive buckets for the native histogram.
+	// Use a no-op span (offset 0, length 0) for a native histogram without any
+	// observations yet and with a zero_threshold of 0. Otherwise, it would be
+	// indistinguishable from a classic histogram.
 	PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
 	// Use either "positive_delta" or "positive_count", the former for
 	// regular histograms with integer counts, the latter for float
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
index 37aa966748b8cab3be6939273379f81953aef9bb..4a926e8d067eedcbdba95afc3620b063de5db0c6 100644
--- a/vendor/github.com/prometheus/common/config/http_config.go
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -129,6 +129,7 @@ func (tv *TLSVersion) String() string {
 // BasicAuth contains basic HTTP authentication credentials.
 type BasicAuth struct {
 	Username     string `yaml:"username" json:"username"`
+	UsernameFile string `yaml:"username_file,omitempty" json:"username_file,omitempty"`
 	Password     Secret `yaml:"password,omitempty" json:"password,omitempty"`
 	PasswordFile string `yaml:"password_file,omitempty" json:"password_file,omitempty"`
 }
@@ -139,6 +140,7 @@ func (a *BasicAuth) SetDirectory(dir string) {
 		return
 	}
 	a.PasswordFile = JoinDir(dir, a.PasswordFile)
+	a.UsernameFile = JoinDir(dir, a.UsernameFile)
 }
 
 // Authorization contains HTTP authorization credentials.
@@ -334,6 +336,9 @@ func (c *HTTPClientConfig) Validate() error {
 	if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) {
 		return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured")
 	}
+	if c.BasicAuth != nil && (string(c.BasicAuth.Username) != "" && c.BasicAuth.UsernameFile != "") {
+		return fmt.Errorf("at most one of basic_auth username & username_file must be configured")
+	}
 	if c.BasicAuth != nil && (string(c.BasicAuth.Password) != "" && c.BasicAuth.PasswordFile != "") {
 		return fmt.Errorf("at most one of basic_auth password & password_file must be configured")
 	}
@@ -541,10 +546,10 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT
 
 		// If a authorization_credentials is provided, create a round tripper that will set the
 		// Authorization header correctly on each request.
-		if cfg.Authorization != nil && len(cfg.Authorization.Credentials) > 0 {
-			rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt)
-		} else if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 {
+		if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 {
 			rt = NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt)
+		} else if cfg.Authorization != nil {
+			rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt)
 		}
 		// Backwards compatibility, be nice with importers who would not have
 		// called Validate().
@@ -555,7 +560,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT
 		}
 
 		if cfg.BasicAuth != nil {
-			rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, rt)
+			rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.UsernameFile, cfg.BasicAuth.PasswordFile, rt)
 		}
 
 		if cfg.OAuth2 != nil {
@@ -625,7 +630,7 @@ func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request)
 	if len(req.Header.Get("Authorization")) == 0 {
 		b, err := os.ReadFile(rt.authCredentialsFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err)
+			return nil, fmt.Errorf("unable to read authorization credentials file %s: %w", rt.authCredentialsFile, err)
 		}
 		authCredentials := strings.TrimSpace(string(b))
 
@@ -645,30 +650,43 @@ func (rt *authorizationCredentialsFileRoundTripper) CloseIdleConnections() {
 type basicAuthRoundTripper struct {
 	username     string
 	password     Secret
+	usernameFile string
 	passwordFile string
 	rt           http.RoundTripper
 }
 
 // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has
 // already been set.
-func NewBasicAuthRoundTripper(username string, password Secret, passwordFile string, rt http.RoundTripper) http.RoundTripper {
-	return &basicAuthRoundTripper{username, password, passwordFile, rt}
+func NewBasicAuthRoundTripper(username string, password Secret, usernameFile, passwordFile string, rt http.RoundTripper) http.RoundTripper {
+	return &basicAuthRoundTripper{username, password, usernameFile, passwordFile, rt}
 }
 
 func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	var username string
+	var password string
 	if len(req.Header.Get("Authorization")) != 0 {
 		return rt.rt.RoundTrip(req)
 	}
-	req = cloneRequest(req)
+	if rt.usernameFile != "" {
+		usernameBytes, err := os.ReadFile(rt.usernameFile)
+		if err != nil {
+			return nil, fmt.Errorf("unable to read basic auth username file %s: %w", rt.usernameFile, err)
+		}
+		username = strings.TrimSpace(string(usernameBytes))
+	} else {
+		username = rt.username
+	}
 	if rt.passwordFile != "" {
-		bs, err := os.ReadFile(rt.passwordFile)
+		passwordBytes, err := os.ReadFile(rt.passwordFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err)
+			return nil, fmt.Errorf("unable to read basic auth password file %s: %w", rt.passwordFile, err)
 		}
-		req.SetBasicAuth(rt.username, strings.TrimSpace(string(bs)))
+		password = strings.TrimSpace(string(passwordBytes))
 	} else {
-		req.SetBasicAuth(rt.username, strings.TrimSpace(string(rt.password)))
+		password = string(rt.password)
 	}
+	req = cloneRequest(req)
+	req.SetBasicAuth(username, password)
 	return rt.rt.RoundTrip(req)
 }
 
@@ -705,7 +723,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
 	if rt.config.ClientSecretFile != "" {
 		data, err := os.ReadFile(rt.config.ClientSecretFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err)
+			return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %w", rt.config.ClientSecretFile, err)
 		}
 		secret = strings.TrimSpace(string(data))
 		rt.mtx.RLock()
@@ -959,7 +977,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 	if c.CertFile != "" {
 		certData, err = os.ReadFile(c.CertFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read specified client cert (%s): %s", c.CertFile, err)
+			return nil, fmt.Errorf("unable to read specified client cert (%s): %w", c.CertFile, err)
 		}
 	} else {
 		certData = []byte(c.Cert)
@@ -968,7 +986,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 	if c.KeyFile != "" {
 		keyData, err = os.ReadFile(c.KeyFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read specified client key (%s): %s", c.KeyFile, err)
+			return nil, fmt.Errorf("unable to read specified client key (%s): %w", c.KeyFile, err)
 		}
 	} else {
 		keyData = []byte(c.Key)
@@ -976,7 +994,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 
 	cert, err := tls.X509KeyPair(certData, keyData)
 	if err != nil {
-		return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
+		return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %w", c.CertFile, c.KeyFile, err)
 	}
 
 	return &cert, nil
@@ -986,7 +1004,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 func readCAFile(f string) ([]byte, error) {
 	data, err := os.ReadFile(f)
 	if err != nil {
-		return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err)
+		return nil, fmt.Errorf("unable to load specified CA cert %s: %w", f, err)
 	}
 	return data, nil
 }
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 906397815138b3c6a0e2e792139e7b7b3b3fc41d..a909b171c866aa3e1ea025eb720f9bb539fae643 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
 package expfmt
 
 import (
+	"bufio"
 	"fmt"
 	"io"
 	"math"
@@ -21,8 +22,8 @@ import (
 	"net/http"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
 
-	"github.com/matttproud/golang_protobuf_extensions/pbutil"
 	"github.com/prometheus/common/model"
 )
 
@@ -86,8 +87,10 @@ type protoDecoder struct {
 
 // Decode implements the Decoder interface.
 func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
-	_, err := pbutil.ReadDelimited(d.r, v)
-	if err != nil {
+	opts := protodelim.UnmarshalOptions{
+		MaxSize: -1,
+	}
+	if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
 		return err
 	}
 	if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index 7f611ffaad7c74e8c2bc79fb240e913ca842013d..02b7a5e81284408d3112ca8099405d5af8cb51a7 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,10 +18,11 @@ import (
 	"io"
 	"net/http"
 
-	"github.com/matttproud/golang_protobuf_extensions/pbutil"
-	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+	"google.golang.org/protobuf/encoding/protodelim"
 	"google.golang.org/protobuf/encoding/prototext"
 
+	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
 	dto "github.com/prometheus/client_model/go"
 )
 
@@ -120,7 +121,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
 	case FmtProtoDelim:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := pbutil.WriteDelimited(w, v)
+				_, err := protodelim.MarshalTo(w, v)
 				return err
 			},
 			close: func() error { return nil },
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 35db1cc9d73c77ff122d425fe34bab4ef44ec3da..26490211af2a56c48b46796dadc800f028c2e6c6 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,6 +16,7 @@ package expfmt
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -24,8 +25,9 @@ import (
 
 	dto "github.com/prometheus/client_model/go"
 
-	"github.com/prometheus/common/model"
 	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/common/model"
 )
 
 // A stateFn is a function that represents a state in a state machine. By
@@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
 	// stream. Turn this error into something nicer and more
 	// meaningful. (io.EOF is often used as a signal for the legitimate end
 	// of an input stream.)
-	if p.err == io.EOF {
+	if p.err != nil && errors.Is(p.err, io.EOF) {
 		p.parseError("unexpected end of input stream")
 	}
 	return p.metricFamiliesByName, p.err
@@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn {
 		// which is not an error but the signal that we are done.
 		// Any other error that happens to align with the start of
 		// a line is still an error.
-		if p.err == io.EOF {
+		if errors.Is(p.err, io.EOF) {
 			p.err = nil
 		}
 		return nil
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c7ad21533593a4e1338ac5621aa5c3cec7..178fdbaf615e382c3bd7aa36b0d5bcd99ad7d8ca 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -90,13 +90,13 @@ func (a *Alert) Validate() error {
 		return fmt.Errorf("start time must be before end time")
 	}
 	if err := a.Labels.Validate(); err != nil {
-		return fmt.Errorf("invalid label set: %s", err)
+		return fmt.Errorf("invalid label set: %w", err)
 	}
 	if len(a.Labels) == 0 {
 		return fmt.Errorf("at least one label pair required")
 	}
 	if err := a.Annotations.Validate(); err != nil {
-		return fmt.Errorf("invalid annotations: %s", err)
+		return fmt.Errorf("invalid annotations: %w", err)
 	}
 	return nil
 }
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000000000000000000000000000000000000..447ab8ad635b8b74cd550740508f0f7d2cd1125e
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+	MetricTypeCounter        = MetricType("counter")
+	MetricTypeGauge          = MetricType("gauge")
+	MetricTypeHistogram      = MetricType("histogram")
+	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+	MetricTypeSummary        = MetricType("summary")
+	MetricTypeInfo           = MetricType("info")
+	MetricTypeStateset       = MetricType("stateset")
+	MetricTypeUnknown        = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7fedb029ac08e906c0e2bc2287537b4c76..f8c5eabaa9974ad7bc19e349f5007bbd962686b1 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -20,12 +20,10 @@ import (
 	"strings"
 )
 
-var (
-	// MetricNameRE is a regular expression matching valid metric
-	// names. Note that the IsValidMetricName function performs the same
-	// check but faster than a match with this regular expression.
-	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-)
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
 
 // A Metric is similar to a LabelSet, but the key difference is that a Metric is
 // a singleton and refers to one and only one stream of samples.
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13c63d1ac239db221d6437170ff77c11877..dc8a0026c4842717b44578899b5673586ae918c8 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@ import (
 // when calculating their combined hash value (aka signature aka fingerprint).
 const SeparatorByte byte = 255
 
-var (
-	// cache the signature of an empty label set.
-	emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
 
 // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
 // given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889d2cc6b247e608d443f1fe987b7f19794b..910b0b71fcce45c9fb9dff83801eb0161718665e 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
 	}
 	for _, m := range s.Matchers {
 		if err := m.Validate(); err != nil {
-			return fmt.Errorf("invalid matcher: %s", err)
+			return fmt.Errorf("invalid matcher: %w", err)
 		}
 	}
 	if s.StartsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index 9eb440413fd342407969969a058930ac59059bcc..8050637d822309924728592c0f3a3c496124717e 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -21,14 +21,12 @@ import (
 	"strings"
 )
 
-var (
-	// ZeroSample is the pseudo zero-value of Sample used to signal a
-	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
-	// and metric nil. Note that the natural zero value of Sample has a timestamp
-	// of 0, which is possible to appear in a real Sample and thus not suitable
-	// to signal a non-existing Sample.
-	ZeroSample = Sample{Timestamp: Earliest}
-)
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
 
 // Sample is a sample pair associated with a metric. A single sample must either
 // define Value or Histogram but not both. Histogram == nil implies the Value
@@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
 
 	value, err := strconv.ParseFloat(f, 64)
 	if err != nil {
-		return fmt.Errorf("error parsing sample value: %s", err)
+		return fmt.Errorf("error parsing sample value: %w", err)
 	}
 	s.Value = SampleValue(value)
 	return nil
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
index 0f615a70530148f1a6a60480b70914822d8ec022..ae35cc2ab4b9957547b5ef186d89bd9d115e690e 100644
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -20,14 +20,12 @@ import (
 	"strconv"
 )
 
-var (
-	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
-	// non-existing sample pair. It is a SamplePair with timestamp Earliest and
-	// value 0.0. Note that the natural zero value of SamplePair has a timestamp
-	// of 0, which is possible to appear in a real SamplePair and thus not
-	// suitable to signal a non-existing SamplePair.
-	ZeroSamplePair = SamplePair{Timestamp: Earliest}
-)
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
 
 // A SampleValue is a representation of a value for a given sample at a given
 // time.
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
index 00caa0ba47af38d37100fbec3f7037cb9b7f5122..28884dbc391982ccf3c33205551c96755afdd77b 100644
--- a/vendor/github.com/prometheus/common/version/info.go
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -48,12 +48,12 @@ func NewCollector(program string) prometheus.Collector {
 			),
 			ConstLabels: prometheus.Labels{
 				"version":   Version,
-				"revision":  getRevision(),
+				"revision":  GetRevision(),
 				"branch":    Branch,
 				"goversion": GoVersion,
 				"goos":      GoOS,
 				"goarch":    GoArch,
-				"tags":      getTags(),
+				"tags":      GetTags(),
 			},
 		},
 		func() float64 { return 1 },
@@ -75,13 +75,13 @@ func Print(program string) string {
 	m := map[string]string{
 		"program":   program,
 		"version":   Version,
-		"revision":  getRevision(),
+		"revision":  GetRevision(),
 		"branch":    Branch,
 		"buildUser": BuildUser,
 		"buildDate": BuildDate,
 		"goVersion": GoVersion,
 		"platform":  GoOS + "/" + GoArch,
-		"tags":      getTags(),
+		"tags":      GetTags(),
 	}
 	t := template.Must(template.New("version").Parse(versionInfoTmpl))
 
@@ -94,10 +94,10 @@ func Print(program string) string {
 
 // Info returns version, branch and revision information.
 func Info() string {
-	return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision())
+	return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, GetRevision())
 }
 
 // BuildContext returns goVersion, platform, buildUser and buildDate information.
 func BuildContext() string {
-	return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, getTags())
+	return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, GetTags())
 }
diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go
index 8eb3a0bf17701d915e8abeb7453870a1e45ceb27..684996f10a74343e723f3ac7ccd9eba33097e4f2 100644
--- a/vendor/github.com/prometheus/common/version/info_default.go
+++ b/vendor/github.com/prometheus/common/version/info_default.go
@@ -16,7 +16,7 @@
 
 package version
 
-func getRevision() string {
+func GetRevision() string {
 	return Revision
 }
 
diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go
index bfc7d41038e748f8efbddd39eb21a471f1c6aef9..992623c6cc92381911b9ee2e4fa2cf41a486f260 100644
--- a/vendor/github.com/prometheus/common/version/info_go118.go
+++ b/vendor/github.com/prometheus/common/version/info_go118.go
@@ -18,17 +18,19 @@ package version
 
 import "runtime/debug"
 
-var computedRevision string
-var computedTags string
+var (
+	computedRevision string
+	computedTags     string
+)
 
-func getRevision() string {
+func GetRevision() string {
 	if Revision != "" {
 		return Revision
 	}
 	return computedRevision
 }
 
-func getTags() string {
+func GetTags() string {
 	return computedTags
 }
 
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 0ce7ea4612e112152d00f7b6e6af9846bc890bc7..062a28185637210b25651c132fd1fda200b2e86d 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -61,7 +61,7 @@ PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_
 SKIP_GOLANGCI_LINT :=
 GOLANGCI_LINT :=
 GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.53.3
+GOLANGCI_LINT_VERSION ?= v1.54.2
 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
 # windows isn't included here because of the path separator being different.
 ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
index 13d74e3957154ba2943ba182241d4d81794b2e33..134767d69ac1302da1308af26b1848dabb67d7b8 100644
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -11,8 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//go:build netbsd || openbsd || solaris || windows || nostatfs
-// +build netbsd openbsd solaris windows nostatfs
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
 
 package procfs
 
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
index bee151445a0538dc7e0a5c04ad0c053774d59ced..80df79c31930ad6d4929520d973bb7b3b8b02b2a 100644
--- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -11,8 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs
-// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs
+//go:build freebsd || linux
+// +build freebsd linux
 
 package procfs
 
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 852c8c4a0e4639ca4c90c3f1fec7fb03d132e916..9d8af6db7425795d2fe89ced504d711971a4bb4d 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -44,6 +44,14 @@ const (
 
 	fieldTransport11TCPLen = 13
 	fieldTransport11UDPLen = 10
+
+	// kernel version >= 4.14 MaxLen
+	// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+	fieldTransport11RDMAMaxLen = 28
+
+	// kernel version <= 4.2 MinLen
+	// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+	fieldTransport11RDMAMinLen = 20
 )
 
 // A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -233,6 +241,33 @@ type NFSTransportStats struct {
 	// A running counter, incremented on each request as the current size of the
 	// pending queue.
 	CumulativePendingQueue uint64
+
+	// Stats below only available with stat version 1.1.
+	// Transport over RDMA
+
+	// accessed when sending a call
+	ReadChunkCount   uint64
+	WriteChunkCount  uint64
+	ReplyChunkCount  uint64
+	TotalRdmaRequest uint64
+
+	// rarely accessed error counters
+	PullupCopyCount      uint64
+	HardwayRegisterCount uint64
+	FailedMarshalCount   uint64
+	BadReplyCount        uint64
+	MrsRecovered         uint64
+	MrsOrphaned          uint64
+	MrsAllocated         uint64
+	EmptySendctxQ        uint64
+
+	// accessed when receiving a reply
+	TotalRdmaReply    uint64
+	FixupCopyCount    uint64
+	ReplyWaitsForSend uint64
+	LocalInvNeeded    uint64
+	NomsgCallCount    uint64
+	BcallCount        uint64
 }
 
 // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 			expectedLength = fieldTransport11TCPLen
 		} else if protocol == "udp" {
 			expectedLength = fieldTransport11UDPLen
+		} else if protocol == "rdma" {
+			expectedLength = fieldTransport11RDMAMinLen
 		} else {
 			return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
 		}
-		if len(ss) != expectedLength {
-			return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss)
+		if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+			(protocol == "rdma" && len(ss) < expectedLength) {
+			return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
 		}
 	default:
-		return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion)
+		return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
 	}
 
 	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
 	// only v1.0 stats are present.
 	// See: https://github.com/prometheus/node_exporter/issues/571.
-	ns := make([]uint64, fieldTransport11TCPLen)
+	//
+	// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+	ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
 	for i, s := range ss {
 		n, err := strconv.ParseUint(s, 10, 64)
 		if err != nil {
@@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 	// we set them to 0 here.
 	if protocol == "udp" {
 		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	} else if protocol == "tcp" {
+		ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+	} else if protocol == "rdma" {
+		ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
 	}
 
 	return &NFSTransportStats{
+		// NFS xprt over tcp or udp
 		Protocol:                 protocol,
 		Port:                     ns[0],
 		Bind:                     ns[1],
@@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
 		BadTransactionIDs:        ns[7],
 		CumulativeActiveRequests: ns[8],
 		CumulativeBacklog:        ns[9],
-		MaximumRPCSlotsUsed:      ns[10],
-		CumulativeSendingQueue:   ns[11],
-		CumulativePendingQueue:   ns[12],
+
+		// NFS xprt over tcp or udp
+		// And statVersion 1.1
+		MaximumRPCSlotsUsed:    ns[10],
+		CumulativeSendingQueue: ns[11],
+		CumulativePendingQueue: ns[12],
+
+		// NFS xprt over rdma
+		// And stat Version 1.1
+		ReadChunkCount:       ns[13],
+		WriteChunkCount:      ns[14],
+		ReplyChunkCount:      ns[15],
+		TotalRdmaRequest:     ns[16],
+		PullupCopyCount:      ns[17],
+		HardwayRegisterCount: ns[18],
+		FailedMarshalCount:   ns[19],
+		BadReplyCount:        ns[20],
+		MrsRecovered:         ns[21],
+		MrsOrphaned:          ns[22],
+		MrsAllocated:         ns[23],
+		EmptySendctxQ:        ns[24],
+		TotalRdmaReply:       ns[25],
+		FixupCopyCount:       ns[26],
+		ReplyWaitsForSend:    ns[27],
+		LocalInvNeeded:       ns[28],
+		NomsgCallCount:       ns[29],
+		BcallCount:           ns[30],
 	}, nil
 }
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index 4b7933e4f9724a37396aa9f34598938b14cab229..fa761b35295edea9173976a677f27dba1b5c9cb7 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -26,6 +26,7 @@ var (
 	rPos          = regexp.MustCompile(`^pos:\s+(\d+)$`)
 	rFlags        = regexp.MustCompile(`^flags:\s+(\d+)$`)
 	rMntID        = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+	rIno          = regexp.MustCompile(`^ino:\s+(\d+)$`)
 	rInotify      = regexp.MustCompile(`^inotify`)
 	rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
 )
@@ -40,6 +41,8 @@ type ProcFDInfo struct {
 	Flags string
 	// Mount point ID
 	MntID string
+	// Inode number
+	Ino string
 	// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
 	InotifyInfos []InotifyInfo
 }
@@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
 		return nil, err
 	}
 
-	var text, pos, flags, mntid string
+	var text, pos, flags, mntid, ino string
 	var inotify []InotifyInfo
 
 	scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
 			flags = rFlags.FindStringSubmatch(text)[1]
 		} else if rMntID.MatchString(text) {
 			mntid = rMntID.FindStringSubmatch(text)[1]
+		} else if rIno.MatchString(text) {
+			ino = rIno.FindStringSubmatch(text)[1]
 		} else if rInotify.MatchString(text) {
 			newInotify, err := parseInotifyInfo(text)
 			if err != nil {
@@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
 		Pos:          pos,
 		Flags:        flags,
 		MntID:        mntid,
+		Ino:          ino,
 		InotifyInfos: inotify,
 	}
 
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index 727549a13f8f992e87978da8354c4168678684b3..7e75c286b5b486748e3fe2968a68979f887bac57 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -63,17 +63,17 @@ type ProcMap struct {
 // parseDevice parses the device token of a line and converts it to a dev_t
 // (mkdev) like structure.
 func parseDevice(s string) (uint64, error) {
-	toks := strings.Split(s, ":")
-	if len(toks) < 2 {
-		return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks))
+	i := strings.Index(s, ":")
+	if i == -1 {
+		return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
 	}
 
-	major, err := strconv.ParseUint(toks[0], 16, 0)
+	major, err := strconv.ParseUint(s[0:i], 16, 0)
 	if err != nil {
 		return 0, err
 	}
 
-	minor, err := strconv.ParseUint(toks[1], 16, 0)
+	minor, err := strconv.ParseUint(s[i+1:], 16, 0)
 	if err != nil {
 		return 0, err
 	}
@@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) {
 
 // parseAddresses parses the start-end address.
 func parseAddresses(s string) (uintptr, uintptr, error) {
-	toks := strings.Split(s, "-")
-	if len(toks) < 2 {
-		return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse)
+	idx := strings.Index(s, "-")
+	if idx == -1 {
+		return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
 	}
 
-	saddr, err := parseAddress(toks[0])
+	saddr, err := parseAddress(s[0:idx])
 	if err != nil {
 		return 0, 0, err
 	}
 
-	eaddr, err := parseAddress(toks[1])
+	eaddr, err := parseAddress(s[idx+1:])
 	if err != nil {
 		return 0, 0, err
 	}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index c055d075db00db95cdd180b9b00240de782fd82b..46307f5721e2c554d708cb0fa5cf258ecde25534 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -23,7 +23,7 @@ import (
 )
 
 // ProcStatus provides status information about the process,
-// read from /proc/[pid]/stat.
+// read from /proc/[pid]/status.
 type ProcStatus struct {
 	// The process ID.
 	PID int
@@ -32,6 +32,8 @@ type ProcStatus struct {
 
 	// Thread group ID.
 	TGID int
+	// List of Pid namespace.
+	NSpids []uint64
 
 	// Peak virtual memory size.
 	VmPeak uint64 // nolint:revive
@@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
 		copy(s.UIDs[:], strings.Split(vString, "\t"))
 	case "Gid":
 		copy(s.GIDs[:], strings.Split(vString, "\t"))
+	case "NSpid":
+		s.NSpids = calcNSPidsList(vString)
 	case "VmPeak":
 		s.VmPeak = vUintBytes
 	case "VmSize":
@@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 {
 	sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
 	return g
 }
+
+func calcNSPidsList(nspidsString string) []uint64 {
+	s := strings.Split(nspidsString, " ")
+	var nspids []uint64
+
+	for _, nspid := range s {
+		nspid, _ := strconv.ParseUint(nspid, 10, 64)
+		if nspid == 0 {
+			continue
+		}
+		nspids = append(nspids, nspid)
+	}
+
+	return nspids
+}
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
new file mode 100644
index 0000000000000000000000000000000000000000..ae6a3bcf12c8b1255a282859ad4ae0bfdecb6047
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -0,0 +1,5 @@
+ot
+fo
+te
+collison
+consequentially
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
new file mode 100644
index 0000000000000000000000000000000000000000..4afbb1fb3bd70fe4d841bd50ebac6134e15819e7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.codespellrc
@@ -0,0 +1,10 @@
+# https://github.com/codespell-project/codespell
+[codespell]
+builtin = clear,rare,informal
+check-filenames =
+check-hidden =
+ignore-words = .codespellignore
+interactive = 1
+skip = .git,go.mod,go.sum,semconv,venv,.tools
+uri-ignore-words-list = *
+write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..314766e91bf544895bec1c32b541ecf566467467
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitattributes
@@ -0,0 +1,3 @@
+* text=auto eol=lf
+*.{cmd,[cC][mM][dD]} text eol=crlf
+*.{bat,[bB][aA][tT]} text eol=crlf
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..895c7664beb5c6cdd4535343d29b05b4bff21a97
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -0,0 +1,22 @@
+.DS_Store
+Thumbs.db
+
+.tools/
+venv/
+.idea/
+.vscode/
+*.iml
+*.so
+coverage.*
+go.work
+go.work.sum
+
+gen/
+
+/example/dice/dice
+/example/namedtracer/namedtracer
+/example/otel-collector/otel-collector
+/example/opencensus/opencensus
+/example/passthrough/passthrough
+/example/prometheus/prometheus
+/example/zipkin/zipkin
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..38a1f56982badf8ca8ac86d915789ecca5cf8120
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "opentelemetry-proto"]
+	path = exporters/otlp/internal/opentelemetry-proto
+	url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a62511f382e2fd1c91d9c8f6b3e70abb71be48df
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -0,0 +1,296 @@
+# See https://github.com/golangci/golangci-lint#config-file
+run:
+  issues-exit-code: 1 #Default
+  tests: true #Default
+
+linters:
+  # Disable everything by default so upgrades to not include new "default
+  # enabled" linters.
+  disable-all: true
+  # Specifically enable linters we want to use.
+  enable:
+    - depguard
+    - errcheck
+    - godot
+    - gofumpt
+    - goimports
+    - gosec
+    - gosimple
+    - govet
+    - ineffassign
+    - misspell
+    - revive
+    - staticcheck
+    - typecheck
+    - unused
+
+issues:
+  # Maximum issues count per one linter.
+  # Set to 0 to disable.
+  # Default: 50
+  # Setting to unlimited so the linter only is run once to debug all issues.
+  max-issues-per-linter: 0
+  # Maximum count of issues with the same text.
+  # Set to 0 to disable.
+  # Default: 3
+  # Setting to unlimited so the linter only is run once to debug all issues.
+  max-same-issues: 0
+  # Excluding configuration per-path, per-linter, per-text and per-source.
+  exclude-rules:
+    # TODO: Having appropriate comments for exported objects helps development,
+    # even for objects in internal packages. Appropriate comments for all
+    # exported objects should be added and this exclusion removed.
+    - path: '.*internal/.*'
+      text: "exported (method|function|type|const) (.+) should have comment or be unexported"
+      linters:
+        - revive
+    # Yes, they are, but it's okay in a test.
+    - path: _test\.go
+      text: "exported func.*returns unexported type.*which can be annoying to use"
+      linters:
+        - revive
+    # Example test functions should be treated like main.
+    - path: example.*_test\.go
+      text: "calls to (.+) only in main[(][)] or init[(][)] functions"
+      linters:
+        - revive
+    # It's okay to not run gosec in a test.
+    - path: _test\.go
+      linters:
+        - gosec
+    # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+    # as we commonly use it in tests and examples.
+    - text: "G404:"
+      linters:
+        - gosec
+    # Igonoring gosec G402: TLS MinVersion too low
+    # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
+    - text: "G402: TLS MinVersion too low."
+      linters:
+        - gosec
+  include:
+    # revive exported should have comment or be unexported.
+    - EXC0012
+    # revive package comment should be of the form ...
+    - EXC0013
+
+linters-settings:
+  depguard:
+    rules:
+      non-tests:
+        files:
+          - "!$test"
+          - "!**/*test/*.go"
+          - "!**/internal/matchers/*.go"
+        deny:
+          - pkg: "testing"
+          - pkg: "github.com/stretchr/testify"
+          - pkg: "crypto/md5"
+          - pkg: "crypto/sha1"
+          - pkg: "crypto/**/pkix"
+      otlp-internal:
+        files:
+          - "!**/exporters/otlp/internal/**/*.go"
+        deny:
+          - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
+            desc: Do not use cross-module internal packages.
+      otlptrace-internal:
+        files:
+          - "!**/exporters/otlp/otlptrace/*.go"
+          - "!**/exporters/otlp/otlptrace/internal/**.go"
+        deny:
+          - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
+            desc: Do not use cross-module internal packages.
+      otlpmetric-internal:
+        files:
+          - "!**/exporters/otlp/otlpmetric/internal/*.go"
+          - "!**/exporters/otlp/otlpmetric/internal/**/*.go"
+        deny:
+          - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
+            desc: Do not use cross-module internal packages.
+      otel-internal:
+        files:
+          - "**/sdk/*.go"
+          - "**/sdk/**/*.go"
+          - "**/exporters/*.go"
+          - "**/exporters/**/*.go"
+          - "**/schema/*.go"
+          - "**/schema/**/*.go"
+          - "**/metric/*.go"
+          - "**/metric/**/*.go"
+          - "**/bridge/*.go"
+          - "**/bridge/**/*.go"
+          - "**/example/*.go"
+          - "**/example/**/*.go"
+          - "**/trace/*.go"
+          - "**/trace/**/*.go"
+        deny:
+          - pkg: "go.opentelemetry.io/otel/internal$"
+            desc: Do not use cross-module internal packages.
+          - pkg: "go.opentelemetry.io/otel/internal/attribute"
+            desc: Do not use cross-module internal packages.
+          - pkg: "go.opentelemetry.io/otel/internal/internaltest"
+            desc: Do not use cross-module internal packages.
+          - pkg: "go.opentelemetry.io/otel/internal/matchers"
+            desc: Do not use cross-module internal packages.
+  godot:
+    exclude:
+      # Exclude links.
+      - '^ *\[[^]]+\]:'
+      # Exclude sentence fragments for lists.
+      - '^[ ]*[-•]'
+      # Exclude sentences prefixing a list.
+      - ':$'
+  goimports:
+    local-prefixes: go.opentelemetry.io
+  misspell:
+    locale: US
+    ignore-words:
+      - cancelled
+  revive:
+    # Sets the default failure confidence.
+    # This means that linting errors with less than 0.8 confidence will be ignored.
+    # Default: 0.8
+    confidence: 0.01
+    rules:
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
+      - name: blank-imports
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
+      - name: bool-literal-in-expr
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
+      - name: constant-logical-expr
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
+      # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
+      - name: context-as-argument
+        disabled: true
+        arguments:
+          allowTypesBefore: "*testing.T"
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
+      - name: context-keys-type
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
+      - name: deep-exit
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
+      - name: defer
+        disabled: false
+        arguments:
+          - ["call-chain", "loop"]
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
+      - name: dot-imports
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
+      - name: duplicated-imports
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
+      - name: early-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
+      - name: empty-block
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
+      - name: empty-lines
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
+      - name: error-naming
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
+      - name: error-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
+      - name: error-strings
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
+      - name: errorf
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
+      - name: exported
+        disabled: false
+        arguments:
+          - "sayRepetitiveInsteadOfStutters"
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
+      - name: flag-parameter
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
+      - name: identical-branches
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
+      - name: if-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
+      - name: increment-decrement
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
+      - name: indent-error-flow
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
+      - name: import-shadowing
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
+      - name: package-comments
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
+      - name: range
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
+      - name: range-val-in-closure
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
+      - name: range-val-address
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
+      - name: redefines-builtin-id
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
+      - name: string-format
+        disabled: false
+        arguments:
+          - - panic
+            - '/^[^\n]*$/'
+            - must not contain line breaks
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
+      - name: struct-tag
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
+      - name: superfluous-else
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
+      - name: time-equal
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
+      - name: var-naming
+        disabled: false
+        arguments:
+          - ["ID"] # AllowList
+          - ["Otel", "Aws", "Gcp"] # DenyList
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
+      - name: var-declaration
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
+      - name: unconditional-recursion
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
+      - name: unexported-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
+      - name: unhandled-error
+        disabled: false
+        arguments:
+          - "fmt.Fprint"
+          - "fmt.Fprintf"
+          - "fmt.Fprintln"
+          - "fmt.Print"
+          - "fmt.Printf"
+          - "fmt.Println"
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
+      - name: unnecessary-stmt
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
+      - name: useless-break
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
+      - name: waitgroup-by-value
+        disabled: false
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
new file mode 100644
index 0000000000000000000000000000000000000000..40d62fa2eb830582103a310ad2c64279779ee049
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -0,0 +1,6 @@
+http://localhost
+http://jaeger-collector
+https://github.com/open-telemetry/opentelemetry-go/milestone/
+https://github.com/open-telemetry/opentelemetry-go/projects
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3202496c357e894e10781e48ff3fbbabf05aa78a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
@@ -0,0 +1,29 @@
+# Default state for all rules
+default: true
+
+# ul-style
+MD004: false
+
+# hard-tabs
+MD010: false
+
+# line-length
+MD013: false
+
+# no-duplicate-header
+MD024:
+  siblings_only: true
+
+#single-title
+MD025: false
+
+# ol-prefix
+MD029:
+  style: ordered
+
+# no-inline-html
+MD033: false
+
+# fenced-code-language
+MD040: false
+
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..5422ca7dccd70cdcd6e5dac7a4fadc8c92bdd88a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -0,0 +1,2916 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.23.1] 2024-02-07
+
+### Fixed
+
+- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
+
+## [1.23.0] 2024-02-06
+
+This release contains the first stable, `v1`, release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
+- Experimental exemplar exporting is added to the metric SDK.
+  See [metric documentation](./sdk/metric/EXPERIMENTAL.md#exemplars) for more information about this feature and how to enable it. (#4871)
+- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
+  This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
+
+### Changed
+
+- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
+  Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
+  It is up to the user to decide if they want to use the returned `Resource` or not.
+  It may have desired attributes overwritten or include stale semantic conventions. (#4876)
+
+### Fixed
+
+- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
+- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
+- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
+
+## [1.23.0-rc.1] 2024-01-18
+
+This is a release candidate for the v1.23.0 release.
+That release is expected to include the `v1` release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+## [1.22.0/0.45.0] 2024-01-17
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+  The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
+  The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
+- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
+  The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
+- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
+  The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+- Experimental cardinality limiting is added to the metric SDK.
+  See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
+- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
+
+### Changed
+
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
+- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
+- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
+  If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g  `if ctx.Err() != nil`). (#4671)
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
+- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
+- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
+- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
+- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
+
+### Fixed
+
+- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
+- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
+- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
+- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
+- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
+- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
+
+## [1.21.0/0.44.0] 2023-11-16
+
+### Removed
+
+- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
+- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
+- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
+- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
+
+### Fixed
+
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
+
+## [1.20.0/0.43.0] 2023-11-10
+
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+
+### Added
+
+- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
+- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
+- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
+- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
+- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
+- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
+- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
+- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
+- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
+- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
+- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
+  Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
+- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
+
+### Changed
+
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
+- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
+  This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
+  Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+  See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
+  This extends the `Tracer` interface and is is a breaking change for any existing implementation.
+  Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+  See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
+  This extends the `Span` interface and is is a breaking change for any existing implementation.
+  Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+  See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
+
+### Fixed
+
+- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
+- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
+
+## [1.19.0/0.42.0/0.0.7] 2023-09-28
+
+This release contains the first stable release of the OpenTelemetry Go [metric SDK].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
+- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
+- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+### Removed
+
+- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
+
+## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
+
+This is a release candidate for the v1.19.0/v0.42.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+## [1.18.0/0.41.0/0.0.6] 2023-09-12
+
+This release drops the compatibility guarantee of [Go 1.19].
+
+### Added
+
+- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
+- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
+
+### Changed
+
+- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
+  The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
+
+### Removed
+
+- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
+- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
+- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
+
+## [1.17.0/0.40.0/0.0.5] 2023-08-28
+
+### Added
+
+- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Add support for exponential histogram aggregations.
+  A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
+- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
+- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
+- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
+  The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
+- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
+- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
+- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
+- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
+- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
+- Support Go 1.21. (#4463)
+
+### Changed
+
+- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
+- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
+- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
+- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
+- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
+- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
+- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
+- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
+- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
+- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
+- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
+- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
+
+### Removed
+
+- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
+  Use the added `WithProducer` option instead. (#4346)
+- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
+  Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
+
+### Fixed
+
+- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
+- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
+- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
+- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
+- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
+- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
+- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
+- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
+- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
+- Improve context cancellation handling in batch span processor's `ForceFlush` in  `go.opentelemetry.io/otel/sdk/trace`. (#4369)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
+- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
+- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
+- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
+- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
+  OpenTelemetry dropped support for Jaeger exporter in July 2023.
+  Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
+  or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
+- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
+  Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
+
+## [1.16.0/0.39.0] 2023-05-18
+
+This release contains the first stable release of the OpenTelemetry Go [metric API].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
+  The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
+- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
+  The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
+- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
+- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
+- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
+
+### Changed
+
+- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
+- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
+  Use `go.opentelemetry.io/otel/metric` instead. (#4055)
+
+### Fixed
+
+- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
+
+## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
+
+This is a release candidate for the v1.16.0/v0.39.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
+  - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+  - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+  - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
+  This stages the metric API to be released as a stable module. (#4038)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/metric/global` package is removed.
+  Use `go.opentelemetry.io/otel` instead. (#4039)
+
+## [1.15.1/0.38.1] 2023-05-02
+
+### Fixed
+
+- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
+
+## [1.15.0/0.38.0] 2023-04-27
+
+### Added
+
+- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916)
+- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949)
+- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970)
+- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971)
+  - The `AddConfig` used to hold configuration for addition measurements
+    - `NewAddConfig` used to create a new `AddConfig`
+    - `AddOption` used to configure an `AddConfig`
+  - The `RecordConfig` used to hold configuration for recorded measurements
+    - `NewRecordConfig` used to create a new `RecordConfig`
+    - `RecordOption` used to configure a `RecordConfig`
+  - The `ObserveConfig` used to hold configuration for observed measurements
+    - `NewObserveConfig` used to create a new `ObserveConfig`
+    - `ObserveOption` used to configure an `ObserveConfig`
+- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`.
+  They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956)
+
+### Changed
+
+- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870)
+- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`.
+  This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916)
+- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941)
+  - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider`
+- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966)
+- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974)
+- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+  - The `Int64Counter.Add` method now accepts `...AddOption`
+  - The `Float64Counter.Add` method now accepts `...AddOption`
+  - The `Int64UpDownCounter.Add` method now accepts `...AddOption`
+  - The `Float64UpDownCounter.Add` method now accepts `...AddOption`
+  - The `Int64Histogram.Record` method now accepts `...RecordOption`
+  - The `Float64Histogram.Record` method now accepts `...RecordOption`
+  - The `Int64Observer.Observe` method now accepts `...ObserveOption`
+  - The `Float64Observer.Observe` method now accepts `...ObserveOption`
+- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+  - The `Observer.ObserveInt64` method now accepts `...ObserveOption`
+  - The `Observer.ObserveFloat64` method now accepts `...ObserveOption`
+- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986)
+
+### Fixed
+
+- `TracerProvider` allows calling `Tracer()` while it's shutting down.
+  It used to deadlock. (#3924)
+- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949)
+- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951)
+- Automatically figure out the default aggregation with `aggregation.Default`. (#3967)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated.
+  Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018)
+
+## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812)
+- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828)
+- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+  Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849)
+- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895)
+- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900)
+- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854)
+
+### Changed
+
+- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832)
+- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832)
+- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833)
+- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844)
+- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849)
+- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853)
+- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892)
+- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900)
+
+### Fixed
+
+- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829)
+- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892)
+- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+  Use the added `float64` instrument configuration instead. (#3895)
+- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+  Use the added `int64` instrument configuration instead. (#3895)
+- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893)
+
+## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+This release drops the compatibility guarantee of [Go 1.18].
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818)
+  - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+  - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+  - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Dropped compatibility testing for [Go 1.18].
+  The project no longer guarantees support for this version of Go. (#3813)
+
+### Fixed
+
+- Handle empty environment variable as it they were not set. (#3764)
+- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
+- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
+- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/global` package is deprecated.
+  Use `go.opentelemetry.io/otel` instead. (#3818)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814)
+
+## [1.14.0/0.37.0/0.0.4] 2023-02-27
+
+This release is the last to support [Go 1.18].
+The next release will require at least [Go 1.19].
+
+### Added
+
+- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
+- Support [Go 1.20]. (#3693)
+- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
+  The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
+  - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+    - `OtelScopeNameKey` -> `OTelScopeNameKey`
+    - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
+    - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
+    - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
+    - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
+    - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
+    - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
+    - `OtelStatusCodeError` -> `OTelStatusCodeError`
+  - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+    - `OtelScopeName` -> `OTelScopeName`
+    - `OtelScopeVersion` -> `OTelScopeVersion`
+    - `OtelLibraryName` -> `OTelLibraryName`
+    - `OtelLibraryVersion` -> `OTelLibraryVersion`
+    - `OtelStatusDescription` -> `OTelStatusDescription`
+- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
+  See the [README](./bridge/opentracing/README.md) for more information. (#3570)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
+- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
+  - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
+  - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
+
+### Changed
+
+- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
+- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
+  This change is made to enable memory reuse by SDK users. (#3732)
+- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
+
+### Fixed
+
+- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
+- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
+- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
+- Do not silently drop unknown schema data with `Parse` in  `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
+- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
+- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
+- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
+  Use the equivalent unit string instead. (#3776)
+  - Use `"1"` instead of `unit.Dimensionless`
+  - Use `"By"` instead of `unit.Bytes`
+  - Use `"ms"` instead of `unit.Milliseconds`
+
+## [1.13.0/0.36.0] 2023-02-07
+
+### Added
+
+- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
+  These functions ensure semantic convention type correctness. (#3675)
+
+### Fixed
+
+- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
+  - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
+
+## [1.12.0/0.35.0] 2023-01-28
+
+### Added
+
+- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+  This options is used to configure `int64` Observer callbacks during their creation. (#3507)
+- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+  This options is used to configure `float64` Observer callbacks during their creation. (#3507)
+- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
+  These additions are used to enable external metric Producers. (#3524)
+- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
+  This new named function type is registered with a `Meter`. (#3564)
+- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
+  The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
+  - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
+  - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
+- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
+  The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
+- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
+  The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
+- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
+  The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
+- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
+  These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+  - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
+  - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
+  - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
+  - `Int64ObservableCounter` replaces the `asyncint64.Counter`
+  - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
+  - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
+  - `Float64Counter` replaces the `syncfloat64.Counter`
+  - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
+  - `Float64Histogram` replaces the `syncfloat64.Histogram`
+  - `Int64Counter` replaces the `syncint64.Counter`
+  - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
+  - `Int64Histogram` replaces the `syncint64.Histogram`
+- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
+  This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
+- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+  This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
+- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
+  The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
+
+### Changed
+
+- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507)
+  - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
+  - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
+  - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
+  - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
+- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
+  This `Registration` can be used to unregister callbacks. (#3522)
+- Global error handler uses an atomic value instead of a mutex. (#3543)
+- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
+- Global logger uses an atomic value instead of a mutex. (#3545)
+- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
+- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
+  This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name.
+  Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
+- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
+- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
+  - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
+  - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
+  - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
+  - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
+  - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
+  - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
+- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
+  - The named `Callback` replaces the inline function parameter. (#3564)
+  - `Callback` is required to return an error. (#3576)
+  - `Callback` accepts the added `Observer` parameter added.
+    This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
+  - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
+- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
+  This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
+  Instead it uses the `net.sock.peer` attributes. (#3581)
+- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
+
+### Fixed
+
+- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
+- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
+  Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
+  Use `NewMetricProducer` instead. (#3541)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
+  Use `NewTracerProvider` instead. (#3116)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
+  - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
+  - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
+  - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
+  - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Int64Counter`
+  - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
+  - The `Histogram` method is replaced by `Meter.Int64Histogram`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Float64Counter`
+  - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
+  - The `Histogram` method is replaced by `Meter.Float64Histogram`
+
+## [1.11.2/0.34.0] 2022-12-05
+
+### Added
+
+- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
+   This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
+- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
+  This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
+- OTLP exporters now recognize: (#3363)
+  - `OTEL_EXPORTER_OTLP_INSECURE`
+  - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
+  - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
+  - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
+  - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
+  - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
+  - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
+- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
+  These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
+  These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
+- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
+   Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
+   The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
+- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
+- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
+- Remove comparable requirement for `Reader`s. (#3387)
+- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
+- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
+- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
+- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
+- Re-enabled Attribute Filters in the Metric SDK. (#3396)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
+- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
+- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
+- Prevent duplicate Prometheus description, unit, and type. (#3469)
+- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
+  Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
+
+## [1.11.1/0.33.0] 2022-10-19
+
+### Added
+
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
+   By default, it will register with the default Prometheus registerer.
+   A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
+- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
+   It will return an error if the exporter fails to register with Prometheus. (#3239)
+
+### Fixed
+
+- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
+- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
+   This fixes the implementation to be compliant with the W3C specification. (#3226)
+- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
+- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
+- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
+- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
+- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
+   Instead the exporter is defined as an "unchecked" collector for Prometheus.
+   This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
+   This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
+
+## [1.11.0/0.32.3] 2022-10-12
+
+### Added
+
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
+
+### Changed
+
+- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
+- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
+  This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
+
+## [0.32.2] Metric SDK (Alpha) - 2022-10-11
+
+### Added
+
+- Added an example of using metric views to customize instruments. (#3177)
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
+
+### Changed
+
+- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
+- Update histogram default bounds to match the requirements of the latest specification. (#3222)
+- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer.  (#3265)
+
+### Fixed
+
+- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
+- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
+- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
+- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
+- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
+
+## [0.32.1] Metric SDK (Alpha) - 2022-09-22
+
+### Changed
+
+- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
+   Invalid characters are replaced with `_`. (#3212)
+
+### Added
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
+- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
+
+### Fixed
+
+- Updated go.mods to point to valid versions of the sdk. (#3216)
+- Set the `MeterProvider` resource on all exported metric data. (#3218)
+
+## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
+
+### Changed
+
+- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
+  Please see the package documentation for how the new SDK is initialized and configured. (#3175)
+- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
+
+### Removed
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
+  A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
+  A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
+- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
+
+## [1.10.0] - 2022-09-09
+
+### Added
+
+- Support Go 1.19. (#3077)
+  Include compatibility testing and document support. (#3077)
+- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
+- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
+
+### Changed
+
+- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`).  (#3096)
+- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
+- All exporters will be shutdown even if one reports an error (#3091)
+- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
+
+## [1.9.0/0.0.3] - 2022-08-01
+
+### Added
+
+- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
+- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
+  The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
+- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
+  The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
+- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
+
+### Fixed
+
+- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
+
+## [1.8.0/0.31.0] - 2022-07-08
+
+### Added
+
+- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
+of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
+
+### Changed
+
+- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
+- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
+- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
+
+### Removed
+
+- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
+
+### Deprecated
+
+- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
+  Use the equivalent `Scope` struct instead. (#2977)
+- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
+  Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
+
+## [1.7.0/0.30.0] - 2022-04-28
+
+### Added
+
+- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
+  The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
+- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
+  The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
+- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
+  The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
+- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
+
+### Fixed
+
+- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
+- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
+
+### Changed
+
+- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
+- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
+  The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
+- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
+  Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
+
+### Deprecated
+
+- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+  Use the equivalent `Iterator.Attribute` method instead. (#2790)
+- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+  Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
+- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+  Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
+
+### Removed
+
+- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+
+## [0.29.0] - 2022-04-11
+
+### Added
+
+- The metrics global package was added back into several test files. (#2764)
+- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
+  This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
+
+### Removed
+
+- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
+  Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
+
+### Changed
+
+- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
+  This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
+
+## [1.6.3] - 2022-04-07
+
+### Fixed
+
+- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
+
+## [1.6.2] - 2022-04-06
+
+### Changed
+
+- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
+  This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
+
+## [1.6.1] - 2022-03-28
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
+  Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
+
+### Security
+
+- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
+  This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
+
+## [1.6.0/0.28.0] - 2022-03-23
+
+### ⚠️ Notice ⚠️
+
+This update is a breaking change of the unstable Metrics API.
+Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
+
+### Added
+
+- Add metrics exponential histogram support.
+  New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
+- Add Go 1.18 to our compatibility tests. (#2679)
+- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
+- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
+
+### Changed
+
+- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
+  High-level changes include:
+
+  - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
+    These `InstrumentProvider`s are managed with a `Meter`.
+  - Synchronous and asynchronous instruments are grouped into their own packages based on value types.
+  - Asynchronous callbacks can now be registered with a `Meter`.
+
+  Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
+
+### Fixed
+
+- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
+
+## [1.5.0] - 2022-03-16
+
+### Added
+
+- Log the Exporters configuration in the TracerProviders message. (#2578)
+- Added support to configure the span limits with environment variables.
+  The following environment variables are supported. (#2606, #2637)
+  - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
+  - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
+  - `OTEL_SPAN_EVENT_COUNT_LIMIT`
+  - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
+  - `OTEL_SPAN_LINK_COUNT_LIMIT`
+  - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
+
+  If the provided environment variables are invalid (negative), the default values would be used.
+- Rename the `gc` runtime name to `go` (#2560)
+- Add resource container ID detection. (#2418)
+- Add span attribute value length limit.
+  The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
+  The default limit for this resource is "unlimited". (#2637)
+- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
+  This option replaces the `WithSpanLimits` option.
+  Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
+  Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
+  Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
+
+### Changed
+
+- Drop oldest tracestate `Member` when capacity is reached. (#2592)
+- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
+- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
+- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
+- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
+- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
+
+### Fixed
+
+- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
+- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
+- Unlimited span limits are now supported (negative values). (#2636, #2637)
+
+### Deprecated
+
+- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
+  Use `WithRawSpanLimits` instead.
+  That option allows setting unlimited and zero limits, this option does not.
+  This option will be kept until the next major version incremented release. (#2637)
+
+## [1.4.1] - 2022-02-16
+
+### Fixed
+
+- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
+
+## [1.4.0] - 2022-02-11
+
+### Added
+
+- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
+- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
+  To enable use a logger with Verbosity (V level) `>=1`. (#2500)
+- Added support to configure the batch span-processor with environment variables.
+  The following environment variables are used. (#2515)
+  - `OTEL_BSP_SCHEDULE_DELAY`
+  - `OTEL_BSP_EXPORT_TIMEOUT`
+  - `OTEL_BSP_MAX_QUEUE_SIZE`.
+  - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
+
+### Changed
+
+- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
+
+### Deprecated
+
+- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
+  Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
+- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
+
+### Fixed
+
+- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
+- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
+- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
+- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
+- W3C baggage will now decode urlescaped values. (#2529)
+- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
+- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
+  Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
+  This drop order still only applies to attributes with unique keys not already contained in the span.
+  If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
+
+### Removed
+
+- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
+  - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
+  - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
+  - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
+
+## [1.3.0] - 2021-12-10
+
+### ⚠️ Notice ⚠️
+
+We have updated the project minimum supported Go version to 1.16
+
+### Added
+
+- Added an internal Logger.
+  This can be used by the SDK and API to provide users with feedback of the internal state.
+  To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
+- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
+- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
+- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
+- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
+  Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
+  When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
+- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
+- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
+
+### Deprecated
+
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+
+### Removed
+
+- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
+- Remove the metric Bound Instruments interface and implementations. (#2399)
+- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
+- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
+
+## [1.2.0] - 2021-11-12
+
+### Changed
+
+- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
+- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
+- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
+  - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
+  - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
+  - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
+- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
+- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
+
+## [1.1.0] - 2021-10-27
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
+  The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
+- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
+  The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
+- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
+  The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
+  - When upgrading from the `semconv/v1.4.0` package note the following name changes:
+    - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
+    - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
+    - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
+    - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
+    - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
+    - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
+
+### Changed
+
+- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
+
+### Fixed
+
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
+- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
+
+## [1.0.1] - 2021-10-01
+
+### Fixed
+
+- json stdout exporter no longer crashes due to concurrency bug. (#2265)
+
+## [Metrics 0.24.0] - 2021-10-01
+
+### Changed
+
+- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
+- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
+  - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
+  - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
+
+## [1.0.0] - 2021-09-20
+
+This is the first stable release for the project.
+This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
+
+### Added
+
+- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
+
+### Fixed
+
+- Slice-valued attributes can correctly be used as map keys. (#2223)
+
+### Removed
+
+- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
+- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
+- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
+- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
+  Use the typed functions and methods added to the package instead. (#2235)
+  - The `Key.Array` method is removed.
+  - The `Array` function is removed.
+  - The `Any` function is removed.
+  - The `ArrayValue` function is removed.
+  - The `AsArray` function is removed.
+
+## [1.0.0-RC3] - 2021-09-02
+
+### Added
+
+- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
+- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
+- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
+  - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
+- Added the `go.opentelemetry.io/otel/example/fib` example package.
+  Included is an example application that computes Fibonacci numbers. (#2203)
+
+### Changed
+
+- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
+  - ValueRecorder becomes Histogram
+  - ValueObserver becomes Gauge
+  - SumObserver becomes CounterObserver
+  - UpDownSumObserver becomes UpDownCounterObserver
+  The API exported from this project is still considered experimental. (#2202)
+- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
+- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
+- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
+- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
+  All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
+  The functions from that package should be used instead. (#2166)
+- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
+  Use the typed `*Slice` functions and types added to the package instead. (#2162)
+- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
+  Use the typed functions instead. (#2181)
+- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
+  The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
+
+### Removed
+
+- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
+
+### Fixed
+
+- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
+- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
+- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
+- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
+- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195)
+- Fixed typos in resources.go. (#2201)
+
+## [1.0.0-RC2] - 2021-07-26
+
+### Added
+
+- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
+- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
+- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+  This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
+- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
+- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
+  This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
+  For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
+- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
+  This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
+
+### Changed
+
+- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
+- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
+
+### Deprecated
+
+- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
+- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
+- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
+  Use the `trace.ParseTraceState` function instead. (#2122)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
+- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
+  The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
+- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+  The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
+- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
+
+### Fixed
+
+- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
+- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
+- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
+- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
+  This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
+- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
+- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
+
+## [Experimental Metrics v0.22.0] - 2021-07-19
+
+### Added
+
+- Adds HTTP support for OTLP metrics exporter. (#2022)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
+
+## [1.0.0-RC1] / 0.21.0 - 2021-06-18
+
+With this release we are introducing a split in module versions.  The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
+while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`.  Modules at major version 1 or greater will not depend on modules
+with major version 0.
+
+### Added
+
+- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
+  - The following status codes are defined as transient errors:
+      | gRPC Status Code | Description |
+      | ---------------- | ----------- |
+      | 1  | Cancelled |
+      | 4  | Deadline Exceeded |
+      | 8  | Resource Exhausted |
+      | 10 | Aborted |
+      | 10 | Out of Range |
+      | 14 | Unavailable |
+      | 15 | Data Loss |
+- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
+- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
+  This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
+- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
+- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
+- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
+- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
+  It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
+- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
+  This method returns the number of list-members the `TraceState` holds. (#1937)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
+  Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
+- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
+- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
+  These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
+- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
+- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
+- Several builtin resource detectors now correctly populate the schema URL. (#1938)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
+- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline.  (#2009)
+
+### Changed
+
+- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
+  `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
+- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
+- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
+- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
+- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
+- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
+- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
+- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
+  This method returns the status of a span using the new `Status` type. (#1874)
+- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
+  This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
+- Unembed `SpanContext` in `Link`. (#1877)
+- Generate Semantic conventions from the specification YAML. (#1891)
+- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
+- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
+- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
+- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
+- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
+- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Refactored option types according to the contribution style guide. (#1882)
+- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
+  This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
+  The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
+- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
+- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
+- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
+- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
+- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
+
+### Removed
+
+- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
+- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
+- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
+  The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
+  The `IsRecording` method returns if the span is recording or not.
+  A read-only span value does not need to know if updates to it will be recorded or not.
+  By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
+- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
+  The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
+  When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
+- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
+  Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
+  The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
+  - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
+- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
+- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
+- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
+  Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
+- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
+  These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
+- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed.  Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
+- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed.  Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
+
+### Fixed
+
+- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
+- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
+- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
+- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
+- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
+- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
+- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
+
+### Security
+
+## [0.20.0] - 2021-04-23
+
+### Added
+
+- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
+- Adds semantic conventions for exceptions. (#1492)
+- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
+  These environment variables can be used to override Jaeger agent hostname and port (#1752)
+- Option `ExportTimeout` was added to batch span processor. (#1755)
+- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
+- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
+- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
+- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
+- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
+- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
+- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
+  - `process.pid`
+  - `process.executable.name`
+  - `process.executable.path`
+  - `process.command_args`
+  - `process.owner`
+  - `process.runtime.name`
+  - `process.runtime.version`
+  - `process.runtime.description`
+- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
+- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
+  - `OTEL_EXPORTER_OTLP_ENDPOINT`
+  - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
+  - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
+  - `OTEL_EXPORTER_OTLP_HEADERS`
+  - `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
+  - `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
+  - `OTEL_EXPORTER_OTLP_COMPRESSION`
+  - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
+  - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
+  - `OTEL_EXPORTER_OTLP_TIMEOUT`
+  - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
+  - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
+  - `OTEL_EXPORTER_OTLP_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
+- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
+- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
+
+### Fixed
+
+- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
+- The Jaeger exporter now correctly sets tags for the Span status code and message.
+  This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
+- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
+  Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
+- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
+- Fixed typo for default service name in Jaeger Exporter. (#1797)
+- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
+- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
+  Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
+
+### Changed
+
+- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
+- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
+- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
+- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
+- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
+  The Span's SpanContext can now self-identify as being remote or not.
+  This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
+- Improve OTLP/gRPC exporter connection errors. (#1737)
+- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
+  The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
+- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
+  This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
+- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
+  to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
+- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
+- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
+  It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
+- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
+- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
+- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
+- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
+- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
+- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
+  This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
+- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
+- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
+  The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
+- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
+
+### Removed
+
+- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
+  These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
+- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
+  This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
+  To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
+- Setting error status while recording error with Span from oteltest package. (#1729)
+- The concept of a remote and local Span stored in a context is unified to just the current Span.
+  Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
+  Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
+  If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
+- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
+  This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
+- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
+- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
+  The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
+- Remove the `WithDisabled` option from the Jaeger exporter.
+  To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
+- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
+  These functions for retrieving specific environment variable values are redundant of other internal functions and
+  are not intended for end user use. (#1824)
+- Removed the Jaeger exporter `WithSDKOptions` `Option`.
+  This option was used to set SDK options for the exporter creation convenience functions.
+  These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
+  If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
+- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
+  The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter `Option` type is removed.
+  The type is no longer used by the exporter to configure anything.
+  All the previous configurations these options provided were duplicates of SDK configuration.
+  They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
+
+## [0.19.0] - 2021-03-18
+
+### Added
+
+- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
+- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
+- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
+- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
+- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
+
+### Changed
+
+- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
+  - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
+- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
+- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
+- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
+- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
+- Added non-empty string check for trace `Attribute` keys. (#1659)
+- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
+- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
+- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
+- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
+
+### Removed
+
+- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
+- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
+- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
+   These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
+- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
+- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
+- Removed `jaeger.WithProcess` configuration option. (#1673)
+- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
+
+### Fixed
+
+- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
+- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
+- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
+- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
+- Synchronization issues in global trace delegate implementation. (#1686)
+- Reduced excess memory usage by global `TracerProvider`. (#1687)
+
+## [0.18.0] - 2021-03-03
+
+### Added
+
+- Added `resource.Default()` for use with meter and tracer providers. (#1507)
+- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
+- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
+- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
+- Compatibility testing suite in the CI system for the following systems. (#1567)
+   | OS      | Go Version | Architecture |
+   | ------- | ---------- | ------------ |
+   | Ubuntu  | 1.15       | amd64        |
+   | Ubuntu  | 1.14       | amd64        |
+   | Ubuntu  | 1.15       | 386          |
+   | Ubuntu  | 1.14       | 386          |
+   | MacOS   | 1.15       | amd64        |
+   | MacOS   | 1.14       | amd64        |
+   | Windows | 1.15       | amd64        |
+   | Windows | 1.14       | amd64        |
+   | Windows | 1.15       | 386          |
+   | Windows | 1.14       | 386          |
+
+### Changed
+
+- Replaced interface `oteltest.SpanRecorder` with its existing implementation
+  `StandardSpanRecorder`. (#1542)
+- Default span limit values to 128. (#1535)
+- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
+- Renamed the `otel/label` package to `otel/attribute`. (#1541)
+- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
+- Parallelize the CI linting and testing. (#1567)
+- Stagger timestamps in exact aggregator tests. (#1569)
+- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
+- Prevent end-users from implementing some interfaces (#1575)
+
+  ```
+      "otel/exporters/otlp/otlphttp".Option
+      "otel/exporters/stdout".Option
+      "otel/oteltest".Option
+      "otel/trace".TracerOption
+      "otel/trace".SpanOption
+      "otel/trace".EventOption
+      "otel/trace".LifeCycleOption
+      "otel/trace".InstrumentationOption
+      "otel/sdk/resource".Option
+      "otel/sdk/trace".ParentBasedSamplerOption
+      "otel/sdk/trace".ReadOnlySpan
+      "otel/sdk/trace".ReadWriteSpan
+  ```
+
+### Removed
+
+- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
+- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
+- Removed the `test-386` make target.
+   This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
+
+### Fixed
+
+- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
+- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
+- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
+- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
+- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
+
+## [0.17.0] - 2021-02-12
+
+### Changed
+
+- Rename project default branch from `master` to `main`. (#1505)
+- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
+- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
+- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
+- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
+
+## Fixed
+
+- Fixed otlpgrpc reconnection issue.
+- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
+- The otel-collector example now uses the default OTLP receiver port of the collector.
+
+## [0.16.0] - 2021-01-13
+
+### Added
+
+- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
+- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
+- Added documentation about the project's versioning policy. (#1388)
+- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
+- Added codeql workflow to GitHub Actions (#1428)
+- Added Gosec workflow to GitHub Actions (#1429)
+- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
+- Add an OpenCensus exporter bridge. (#1444)
+
+### Changed
+
+- Rename `internal/testing` to `internal/internaltest`. (#1449)
+- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
+- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
+- Improve span duration accuracy. (#1360)
+- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
+- Remove duplicate checkout from GitHub Actions workflow (#1407)
+- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
+- Metric `exact` aggregator includes per-point timestamps (#1412)
+- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
+- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
+- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
+- Unify endpoint API that related to OTel exporter. (#1401)
+- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435)
+- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
+- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
+- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
+- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
+- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
+- Metric Push and Pull Controller components are combined into a single "basic" Controller:
+  - `WithExporter()` and `Start()` to configure Push behavior
+  - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
+  - `Start()` and `Stop()` accept Context. (#1378)
+- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
+
+### Removed
+
+- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
+- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
+- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
+
+### Fixed
+
+- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
+
+## [0.15.0] - 2020-12-10
+
+### Added
+
+- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363)
+
+### Changed
+
+- The Zipkin exporter now uses the Span status code to determine. (#1328)
+- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357)
+- Move the OpenCensus example into `example` directory. (#1359)
+- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363)
+- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
+- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
+
+### Fixed
+
+- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
+
+## [0.14.0] - 2020-11-19
+
+### Added
+
+- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
+- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
+- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
+- `TraceState` has been added to `SpanContext`. (#1340)
+- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
+- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
+- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
+- Add missing tests for `sdk/trace/attributes_map.go`. (#1337)
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307)
+  - `ID` has been renamed to `TraceID`.
+  - `IDFromHex` has been renamed to `TraceIDFromHex`.
+  - `EmptySpanContext` is removed.
+- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229)
+- OTLP Exporter updates:
+  - supports OTLP v0.6.0 (#1230, #1354)
+  - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296)
+- The Sampler is now called on local child spans. (#1233)
+- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240)
+- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`.
+   This matches the returned type and fixes misuse of the term metric. (#1240)
+- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241)
+- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252)
+- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321)
+- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316)
+- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316)
+- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254)
+- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254)
+- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330)
+- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330)
+- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267)
+- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276)
+- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and
+  `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235)
+- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210)
+- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310)
+- Updated span collection limits for attribute, event and link counts to 1000 (#1318)
+- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338)
+
+### Removed
+
+- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243)
+- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy.
+   It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254)
+- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
+   `Tracer` and `Span` from the same module should be used in their place instead. (#1306)
+- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
+- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
+
+### Fixed
+
+- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244)
+- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258)
+- Fix condition in `label.Any`. (#1299)
+- Fix global `TracerProvider` to pass options to its configured provider. (#1329)
+- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309)
+
+## [0.13.0] - 2020-10-08
+
+### Added
+
+- OTLP Metric exporter supports Histogram aggregation. (#1209)
+- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
+- A Baggage API to implement the OpenTelemetry specification. (#1217)
+- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227)
+
+### Changed
+
+- Set default propagator to no-op propagator. (#1184)
+- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325)
+- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
+- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
+   They now are `Unset`, `Error`, and `Ok`.
+   They no longer track the gRPC codes. (#1214)
+- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
+- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325)
+- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264)
+
+### Fixed
+
+- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
+
+### Removed
+
+- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
+- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
+   The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
+- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
+- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
+- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
+- Nested array/slice support has been removed. (#1226)
+
+## [0.12.0] - 2020-09-24
+
+### Added
+
+- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
+- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
+   This addition was made to conform with our project option conventions. (#1155)
+- Instrumentation library information was added to the Zipkin exporter. (#1119)
+- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
+- More semantic conventions for k8s as resource attributes. (#1167)
+
+### Changed
+
+- Add reconnecting udp connection type to Jaeger exporter.
+   This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
+   It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
+- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
+   This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
+- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
+   This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
+- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
+- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
+    This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
+- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
+- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
+- Move `tools` package under `internal`. (#1141)
+- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
+   The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`.  Other exported functions and types are unchanged.
+- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
+- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
+- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
+- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
+   recommend the use of `newConfig()` instead of `configure()`. (#1163)
+- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
+- Ensure exported interface types include parameter names and update the
+   Style Guide to reflect this styling rule. (#1172)
+- Don't consider unset environment variable for resource detection to be an error. (#1170)
+- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
+  `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
+- ValueObserver instruments use LastValue aggregator by default. (#1165)
+- OTLP Metric exporter supports LastValue aggregation. (#1165)
+- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
+- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
+- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
+- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
+- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
+- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
+
+### Removed
+
+- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
+   `go.opentelemetry.io/contrib/propagators/` module. (#1191)
+- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
+
+### Fixed
+
+- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
+- Fix missing shutdown processor in otel-collector example. (#1186)
+- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
+
+## [0.11.0] - 2020-08-24
+
+### Added
+
+- Support for exporting array-valued attributes via OTLP. (#992)
+- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
+- Support for filtering metric label sets. (#1047)
+- A dimensionality-reducing metric Processor. (#1057)
+- Integration tests for more OTel Collector Attribute types. (#1062)
+- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
+
+### Changed
+
+- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
+- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
+- Rename `api/testharness` to `api/apitest`. (#1049)
+- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
+- Change Metric Processor to merge multiple observations. (#1024)
+- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
+   This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
+- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
+- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
+- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
+- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
+- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
+- Unify Callback Function Naming.
+   Rename `*Callback` with `*Func`. (#1061)
+- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
+- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
+   This interface still supports the export of `SpanData`, but only as a slice.
+   Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
+   If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
+   This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
+
+### Removed
+
+- Duplicate, unused API sampler interface. (#999)
+   Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
+- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
+   This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
+- The `WithSpan` method of the `Tracer` interface.
+   The functionality this method provided was limited compared to what a user can provide themselves.
+   It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
+- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
+   These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
+- The `oterror` package. (#1026)
+- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
+
+### Fixed
+
+- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
+- Correct instrumentation version tag in Jaeger exporter. (#1037)
+- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
+- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
+- The `otel-collector` example referenced outdated collector processors. (#1006)
+
+## [0.10.0] - 2020-07-29
+
+This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
+
+### Added
+
+- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
+    These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
+- Add propagator option for gRPC instrumentation. (#986)
+- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
+
+### Changed
+
+- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
+   This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
+- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
+   This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
+- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
+- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
+  - `value.Bool` was replaced with `kv.BoolValue`.
+  - `value.Int64` was replaced with `kv.Int64Value`.
+  - `value.Uint64` was replaced with `kv.Uint64Value`.
+  - `value.Float64` was replaced with `kv.Float64Value`.
+  - `value.Int32` was replaced with `kv.Int32Value`.
+  - `value.Uint32` was replaced with `kv.Uint32Value`.
+  - `value.Float32` was replaced with `kv.Float32Value`.
+  - `value.String` was replaced with `kv.StringValue`.
+  - `value.Int` was replaced with `kv.IntValue`.
+  - `value.Uint` was replaced with `kv.UintValue`.
+  - `value.Array` was replaced with `kv.ArrayValue`.
+- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
+- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
+- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
+- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
+- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
+
+### Removed
+
+- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
+
+### Fixed
+
+- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
+- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
+- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
+- Correct Go language formatting in the README documentation. (#961)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
+- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
+
+## [0.9.0] - 2020-07-20
+
+### Added
+
+- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
+- A Detector to automatically detect resources from an environment variable. (#939)
+- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
+- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
+   References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
+
+### Changed
+
+- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
+
+### Removed
+
+- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
+
+## [0.8.0] - 2020-07-09
+
+### Added
+
+- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
+   A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
+- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
+- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
+- Add `peer.service` semantic attribute. (#898)
+- Add database-specific semantic attributes. (#899)
+- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
+- Add http content size semantic conventions. (#905)
+- Include `http.request_content_length` in HTTP request basic attributes. (#905)
+- Add semantic conventions for operating system process resource attribute keys. (#919)
+- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
+
+### Changed
+
+- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
+- Use lowercase header names for B3 Multiple Headers. (#881)
+- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
+   This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
+   If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
+- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
+   Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
+   This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
+- Extend semantic conventions for RPC. (#900)
+- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
+  - `"api/standard".FaaSName` -> `FaaSNameKey`
+  - `"api/standard".FaaSID` -> `FaaSIDKey`
+  - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
+  - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
+
+### Removed
+
+- The `FlagsUnused` trace flag is removed.
+   The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
+- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
+   If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
+
+### Fixed
+
+- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
+- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
+- The B3 propagator now propagates the debug flag.
+   This removes the behavior of changing the debug flag into a set sampling bit.
+   Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
+- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
+- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
+- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
+- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
+- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
+- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
+- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
+- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
+- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
+- Update otel-colector example to use the v0.5.0 collector. (#915)
+- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
+- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
+- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
+   This is in accordance with OpenTelemetry semantic conventions. (#922)
+- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
+- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
+- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
+- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
+
+## [0.7.0] - 2020-06-26
+
+This release implements the v0.5.0 version of the OpenTelemetry specification.
+
+### Added
+
+- The othttp instrumentation now includes default metrics. (#861)
+- This CHANGELOG file to track all changes in the project going forward.
+- Support for array type attributes. (#798)
+- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
+- Timestamps are now passed to exporters for each export. (#835)
+- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
+   This replaces the prior `Record` `struct` use for this purpose. (#835)
+- New dependabot integration to automate package upgrades. (#814)
+- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
+   This instrumentation version is passed on to exporters. (#811) (#805) (#802)
+- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
+- Environment variables for Jaeger exporter are supported. (#796)
+- New `aggregation.Kind` in the export metric API. (#808)
+- New example that uses OTLP and the collector. (#790)
+- Handle errors in the span `SetName` during span initialization. (#791)
+- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
+- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
+- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
+   There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
+- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
+- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
+- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
+
+### Changed
+
+- Rename `Integrator` to `Processor` in the metric SDK. (#863)
+- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
+- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
+- Rename `simple` integrator to `basic` integrator. (#857)
+- Merge otlp collector examples. (#841)
+- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
+   With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
+- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
+- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
+   All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
+   Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
+- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
+- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
+- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
+- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
+- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
+- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
+- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
+
+### Removed
+
+- `Uint64NumberKind` and related functions from the API. (#864)
+- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
+- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
+
+### Fixed
+
+- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
+- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
+- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
+- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
+- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
+- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
+- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
+- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
+- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
+- Set span status from HTTP status code in the othttp instrumentation. (#832)
+- Fixed typo in push controller comment. (#834)
+- The `Aggregator` testing has been updated and cleaned. (#812)
+- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
+- Fixed `global` `handler_test.go` test failure. #804
+- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
+- Fixed OTLP example's accidental early close of exporter. (#807)
+- Ensure zipkin exporter reads and closes response body. (#788)
+- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
+- Clean up tools and RELEASING documentation. (#762)
+
+## [0.6.0] - 2020-05-21
+
+### Added
+
+- Support for `Resource`s in the prometheus exporter. (#757)
+- New pull controller. (#751)
+- New `UpDownSumObserver` instrument. (#750)
+- OpenTelemetry collector demo. (#711)
+- New `SumObserver` instrument. (#747)
+- New `UpDownCounter` instrument. (#745)
+- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
+- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
+
+### Changed
+
+- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
+- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
+- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
+- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
+- The prometheus exporter now uses the new pull controller. (#751)
+- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
+- Support use of synchronous instruments in asynchronous callbacks (#725)
+- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
+- Rename `Observer` instrument to `ValueObserver`. (#734)
+- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
+- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
+- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
+
+### Fixed
+
+- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
+- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
+- Fix `string` case in `kv` `Infer` function. (#746)
+- Fix panic in grpctrace client interceptors. (#740)
+- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
+- Rewrite span batch process queue batching logic. (#719)
+- Remove the push controller named Meter map. (#738)
+- Fix Histogram aggregator initial state (fix #735). (#736)
+- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
+- Added test for grpctrace `UnaryInterceptorClient`. (#695)
+- Rearrange `api/metric` code layout. (#724)
+
+## [0.5.0] - 2020-05-13
+
+### Added
+
+- Batch `Observer` callback support. (#717)
+- Alias `api` types to root package of project. (#696)
+- Create basic `othttp.Transport` for simple client instrumentation. (#678)
+- `SetAttribute(string, interface{})` to the trace API. (#674)
+- Jaeger exporter option that allows user to specify custom http client. (#671)
+- `Stringer` and `Infer` methods to `key`s. (#662)
+
+### Changed
+
+- Rename `NewKey` in the `kv` package to just `Key`. (#721)
+- Move `core` and `key` to `kv` package. (#720)
+- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
+- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
+- Move `Number` from `core` to `api/metric` package. (#706)
+- Move `SpanContext` from `core` to `trace` package. (#692)
+- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
+
+### Fixed
+
+- Update tooling to run generators in all submodules. (#705)
+- gRPC interceptor regexp to match methods without a service name. (#683)
+- Use a `const` for padding 64-bit B3 trace IDs. (#701)
+- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
+- Left-pad 64-bit B3 trace IDs with zero. (#698)
+- Propagate at least the first W3C tracestate header. (#694)
+- Remove internal `StateLocker` implementation. (#688)
+- Increase instance size CI system uses. (#690)
+- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
+- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
+- Reimplement histogram using mutex instead of `StateLocker`. (#669)
+- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
+- Update documentation to not include any references to `WithKeys`. (#672)
+- Correct misspelling. (#668)
+- Fix clobbering of the span context if extraction fails. (#656)
+- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
+
+## [0.4.3] - 2020-04-24
+
+### Added
+
+- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
+- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
+- New `api/label` package, providing common label set implementation. (#651)
+- Support for JSON marshaling of `Resources`. (#654)
+- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
+- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
+- `WithSpanFormatter` option to the othttp plugin. (#617)
+- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
+- The prometheus exporter now supports exporting histograms. (#601)
+- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
+- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
+- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
+- An iterable structure (`AttributeIterator`) for `Resource` attributes.
+
+### Changed
+
+- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
+- Pass `Resources` through the metrics export pipeline. (#659)
+
+### Removed
+
+- `WithKeys` option from the metric API. (#639)
+
+### Fixed
+
+- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
+- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
+- Use type names for return values in jaeger exporter. (#648)
+- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
+- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
+- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
+- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
+- Add error wrapping to the prometheus exporter. (#631)
+- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
+- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
+- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
+- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
+- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
+- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
+- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
+
+## [0.4.2] - 2020-03-31
+
+### Fixed
+
+- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
+- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
+
+## [0.4.1] - 2020-03-31
+
+### Fixed
+
+- Update `tag.sh` to create signed tags. (#604)
+
+## [0.4.0] - 2020-03-30
+
+### Added
+
+- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
+- Script to verify examples after a new release. (#579)
+
+### Removed
+
+- The dogstatsd exporter due to lack of support.
+   This additionally removes support for statsd. (#591)
+- `LabelSet` from the metric API.
+   This is replaced by a `[]core.KeyValue` slice. (#595)
+- `Labels` from the metric API's `Meter` interface. (#595)
+
+### Changed
+
+- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
+- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
+- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
+
+### Fixed
+
+- Corrected missing return in mock span. (#582)
+- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
+- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
+- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
+- Add a `RecordBatch` benchmark. (#594)
+- Moved span transforms of the OTLP exporter to the internal package. (#593)
+- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
+- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
+- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
+- Update project documentation godoc.org links to pkg.go.dev. (#602)
+
+## [0.3.0] - 2020-03-21
+
+This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
+There is still a possibility of breaking changes.
+
+### Added
+
+- Add `Observer` metric instrument. (#474)
+- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
+- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
+- The zipkin trace exporter. (#495)
+- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
+- Add `StatusMessage` field to the trace `Span`. (#524)
+- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
+- The `Resource` type was added to the SDK. (#528)
+- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
+- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
+   Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
+- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
+- Scripts to better automate the release process. (#576)
+
+### Changed
+
+- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
+- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
+- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
+- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
+- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
+- Rename metric API `Options` to `Config`. (#541)
+- Rename metric `Counter` aggregator to be `Sum`. (#541)
+- Unify metric options into `Option` from instrument specific options. (#541)
+- The trace API's `TraceProvider` now support `Resource`s. (#545)
+- Correct error in zipkin module name. (#548)
+- The jaeger trace exporter now supports `Resource`s. (#551)
+- Metric SDK now supports `Resource`s.
+   The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
+- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
+- The stdout trace exporter now supports `Resource`s. (#558)
+- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
+- Replace `Ordered` with an iterator in `export.Labels`. (#567)
+
+### Removed
+
+- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
+- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
+- `GetDescriptor` from the metric SDK. (#575)
+- The `Gauge` instrument from the metric API. (#537)
+
+### Fixed
+
+- Make histogram aggregator checkpoint consistent. (#438)
+- Update README with import instructions and how to build and test. (#505)
+- The default label encoding was updated to be unique. (#508)
+- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
+- Fix data race in `BatchedSpanProcessor`. (#518)
+- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
+- Use a variable-size array to represent ordered labels in maps. (#523)
+- Update the OTLP protobuf and update changed import path. (#532)
+- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
+- Eliminate goroutine leak in histogram stress test. (#547)
+- Update OTLP exporter with latest protobuf. (#550)
+- Add filters to the othttp plugin. (#556)
+- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
+- Encode labels once during checkpoint.
+   The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
+   This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
+- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
+
+## [0.2.3] - 2020-03-04
+
+### Added
+
+- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
+- Configurable push frequency for exporters setup pipeline. (#504)
+
+### Changed
+
+- Rename the `exporter` directory to `exporters`.
+   The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
+   This resulted in all subsequent releases not becoming the default latest.
+   A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
+   Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
+   Consequentially, this action also renames *all* exporter packages. (#502)
+
+### Removed
+
+- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
+
+## [0.2.2] - 2020-02-27
+
+### Added
+
+- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
+- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
+- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
+- `Config` and configuring `Option` to the propagator API. (#467)
+- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
+- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
+- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
+- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
+- Histogram aggregator. (#433)
+- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
+- `AlwaysParentSample` sampler to the trace API. (#455)
+- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
+
+### Changed
+
+- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
+- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
+- Move correlation context propagation to correlation package. (#479)
+- Do not default to putting remote span context into links. (#480)
+- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
+- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
+- Renamed the `export` package to `metric` to match directory structure. (#432)
+- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
+- Rename the `api/propagators` package to `api/propagation`. (#444)
+- Move the propagators from the `propagators` package into the `trace` API package. (#444)
+- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
+- Moved all dependencies of tools package to a tools directory. (#466)
+
+### Removed
+
+- Binary propagators. (#467)
+- NOOP propagator. (#467)
+
+### Fixed
+
+- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
+- Fix a possible nil-dereference crash (#478)
+- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
+- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
+- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
+- Initialize `onError` based on `Config` in prometheus exporter. (#486)
+- Correct module name in prometheus exporter README. (#475)
+- Removed tracer name prefix from span names. (#430)
+- Fix `aggregator_test.go` import package comment. (#431)
+- Improved detail in stdout exporter. (#436)
+- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
+- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
+- Reword function documentation in gRPC plugin. (#446)
+- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
+- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
+- Upgraded to Go 1.13 in CI. (#465)
+- Correct opentelemetry.io URL in trace SDK documentation. (#464)
+- Refactored reference counting logic in SDK determination of stale records. (#468)
+- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
+
+## [0.2.1.1] - 2020-01-13
+
+### Fixed
+
+- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428)
+
+## [0.2.1] - 2020-01-08
+
+### Added
+
+- Global meter forwarding implementation.
+   This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
+- Global trace forwarding implementation.
+   This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
+- Standardize export pipeline creation in all exporters. (#395)
+- A testing, organization, and comments for 64-bit field alignment. (#418)
+- Script to tag all modules in the project. (#414)
+
+### Changed
+
+- Renamed `propagation` package to `propagators`. (#362)
+- Renamed `B3Propagator` propagator to `B3`. (#362)
+- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
+- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
+- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
+- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
+- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
+- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
+- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
+- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
+- `Number` now has a pointer receiver for its methods. (#375)
+- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
+- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
+- Renamed `Message` in Event to `Name` in the trace API. (#389)
+- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
+- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
+- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
+- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
+- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
+- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
+
+### Fixed
+
+- Aggregator import path corrected. (#421)
+- Correct links in README. (#368)
+- The README was updated to match latest code changes in its examples. (#374)
+- Don't capitalize error statements. (#375)
+- Fix ignored errors. (#375)
+- Fix ambiguous variable naming. (#375)
+- Removed unnecessary type casting. (#375)
+- Use named parameters. (#375)
+- Updated release schedule. (#378)
+- Correct http-stackdriver example module name. (#394)
+- Removed the `http.request` span in `httptrace` package. (#397)
+- Add comments in the metrics SDK (#399)
+- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
+- Add documentation of compatible exporters in the README. (#405)
+- Typo fix. (#408)
+- Simplify span check logic in SDK tracer implementation. (#419)
+
+## [0.2.0] - 2019-12-03
+
+### Added
+
+- Unary gRPC tracing example. (#351)
+- Prometheus exporter. (#334)
+- Dogstatsd metrics exporter. (#326)
+
+### Changed
+
+- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
+- Rename `GetMeter` to `Meter`. (#357)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Move `/global` package to `/api/global`. (#356)
+- Rename `GetTracer` to `Tracer`. (#347)
+
+### Removed
+
+- `SetAttribute` from the `Span` interface in the trace API. (#361)
+- `AddLink` from the `Span` interface in the trace API. (#349)
+- `Link` from the `Span` interface in the trace API. (#349)
+
+### Fixed
+
+- Exclude example directories from coverage report. (#365)
+- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
+- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
+- Run the race checker for all test. (#354)
+- Redundant commands in the Makefile are removed. (#354)
+- Split the `generate` and `lint` targets of the Makefile. (#354)
+- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
+- Add example Prometheus binary to gitignore. (#358)
+- Support negative numbers with the `MaxSumCount`. (#335)
+- Resolve race conditions in `push_test.go` identified in #339. (#340)
+- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
+- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
+   Previously it was testing `AlwaysSample` twice. (#325)
+- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
+- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
+- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
+   This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
+   This was corrected. (#333)
+
+## [0.1.2] - 2019-11-18
+
+### Fixed
+
+- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
+- Removed unnecessary unslicing of parameters that are already a slice. (#324)
+
+## [0.1.1] - 2019-11-18
+
+This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
+
+### Added
+
+- Metrics stdout export pipeline. (#265)
+- Array aggregation for raw measure metrics. (#282)
+- The core.Value now have a `MarshalJSON` method. (#281)
+
+### Removed
+
+- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
+- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
+
+### Changed
+
+- Allocation in LabelSet construction to reduce GC overhead. (#318)
+- `trace.WithAttributes` to append values instead of replacing (#315)
+- Use a formula for tolerance in sampling tests. (#298)
+- Move export types into trace and metric-specific sub-directories. (#289)
+- `SpanKind` back to being based on an `int` type. (#288)
+
+### Fixed
+
+- URL to OpenTelemetry website in README. (#323)
+- Name of othttp default tracer. (#321)
+- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
+- CI modules cache to correctly restore/save from/to the cache. (#316)
+- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
+- README now reflects the new code structure introduced with these changes. (#291)
+- Make the basic example work. (#279)
+
+## [0.1.0] - 2019-11-04
+
+This is the first release of open-telemetry go library.
+It contains api and sdk for trace and meter.
+
+### Added
+
+- Initial OpenTelemetry trace and metric API prototypes.
+- Initial OpenTelemetry trace, metric, and export SDK packages.
+- A wireframe bridge to support compatibility with OpenTracing.
+- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
+- Exporters for Jaeger, Stackdriver, and stdout.
+- Propagators for binary, B3, and trace-context protocols.
+- Project information and guidelines in the form of a README and CONTRIBUTING.
+- Tools to build the project and a Makefile to automate the process.
+- Apache-2.0 license.
+- CircleCI build CI manifest files.
+- CODEOWNERS file to track owners of this project.
+
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.23.1...HEAD
+[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
+[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
+[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
+[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
+[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
+[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
+[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
+[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
+[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
+[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
+[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
+[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
+[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
+[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
+[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
+[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
+[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
+[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
+[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
+[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
+[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
+[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
+[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
+[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
+[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
+[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
+[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
+[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
+[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
+[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
+[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
+[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
+[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
+[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
+[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
+[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
+[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
+[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
+[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
+[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
+[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
+[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
+[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
+[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
+[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
+[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
+[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
+[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
+[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
+[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
+[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
+[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
+[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
+[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
+[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
+[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
+[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
+[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
+[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
+[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
+[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
+[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
+[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
+[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
+[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
+[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
+[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
+[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
+[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
+[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
+[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
+[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
+[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
+[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
+[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
+[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+[Go 1.20]: https://go.dev/doc/go1.20
+[Go 1.19]: https://go.dev/doc/go1.19
+[Go 1.18]: https://go.dev/doc/go1.18
+
+[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
+[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
+[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
new file mode 100644
index 0000000000000000000000000000000000000000..31d336d92229cd952f1f76065c8ece4d18934329
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -0,0 +1,17 @@
+#####################################################
+#
+# List of approvers for this repository
+#
+#####################################################
+#
+# Learn about membership in OpenTelemetry community:
+#  https://github.com/open-telemetry/community/blob/main/community-membership.md
+#
+#
+# Learn about CODEOWNERS file format:
+#  https://help.github.com/en/articles/about-code-owners
+#
+
+* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+
+CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..c9f2bac55bf6dbcc25ce0816c6fd4ccf0db47182
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -0,0 +1,645 @@
+# Contributing to opentelemetry-go
+
+The Go special interest group (SIG) meets regularly. See the
+OpenTelemetry
+[community](https://github.com/open-telemetry/community#golang-sdk)
+repo for information on this and other language SIGs.
+
+See the [public meeting
+notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit)
+for a summary description of past meetings. To request edit access,
+join the meeting or get in touch on
+[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
+
+## Development
+
+You can view and edit the source code by cloning this repository:
+
+```sh
+git clone https://github.com/open-telemetry/opentelemetry-go.git
+```
+
+Run `make test` to run the tests instead of `go test`.
+
+There are some generated files checked into the repo. To make sure
+that the generated files are up-to-date, run `make` (or `make
+precommit` - the `precommit` target is the default).
+
+The `precommit` target also fixes the formatting of the code and
+checks the status of the go module files.
+
+Additionally, there is a `codespell` target that checks for common
+typos in the code. It is not run by default, but you can run it
+manually with `make codespell`. It will set up a virtual environment
+in `venv` and install `codespell` there.
+
+If after running `make precommit` the output of `git status` contains
+`nothing to commit, working tree clean` then it means that everything
+is up-to-date and properly formatted.
+
+## Pull Requests
+
+### How to Send Pull Requests
+
+Everyone is welcome to contribute code to `opentelemetry-go` via
+GitHub pull requests (PRs).
+
+To create a new PR, fork the project in GitHub and clone the upstream
+repo:
+
+```sh
+go get -d go.opentelemetry.io/otel
+```
+
+(This may print some warning about "build constraints exclude all Go
+files", just ignore it.)
+
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
+can alternatively use `git` directly with:
+
+```sh
+git clone https://github.com/open-telemetry/opentelemetry-go
+```
+
+(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
+that name is a kind of a redirector to GitHub that `go get` can
+understand, but `git` does not.)
+
+This would put the project in the `opentelemetry-go` directory in
+current working directory.
+
+Enter the newly created directory and add your fork as a new remote:
+
+```sh
+git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
+```
+
+Check out a new branch, make modifications, run linters and tests, update
+`CHANGELOG.md`, and push the branch to your fork:
+
+```sh
+git checkout -b <YOUR_BRANCH_NAME>
+# edit files
+# update changelog
+make precommit
+git add -p
+git commit
+git push <YOUR_FORK> <YOUR_BRANCH_NAME>
+```
+
+Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
+request ID to the entry you added to `CHANGELOG.md`.
+
+Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
+Rewriting Git history makes it difficult to keep track of iterations during code review.
+All pull requests are squashed to a single commit upon merge to `main`.
+
+### How to Receive Comments
+
+* If the PR is not ready for review, please put `[WIP]` in the title,
+  tag it as `work-in-progress`, or mark it as
+  [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
+* Make sure CLA is signed and CI is clear.
+
+### How to Get PRs Merged
+
+A PR is considered **ready to merge** when:
+
+* It has received two qualified approvals[^1].
+
+  This is not enforced through automation, but needs to be validated by the
+  maintainer merging.
+  * The qualified approvals need to be from [Approver]s/[Maintainer]s
+    affiliated with different companies. Two qualified approvals from
+    [Approver]s or [Maintainer]s affiliated with the same company counts as a
+    single qualified approval.
+  * PRs introducing changes that have already been discussed and consensus
+    reached only need one qualified approval. The discussion and resolution
+    needs to be linked to the PR.
+  * Trivial changes[^2] only need one qualified approval.
+
+* All feedback has been addressed.
+  * All PR comments and suggestions are resolved.
+  * All GitHub Pull Request reviews with a status of "Request changes" have
+    been addressed. Another review by the objecting reviewer with a different
+    status can be submitted to clear the original review, or the review can be
+    dismissed by a [Maintainer] when the issues from the original review have
+    been addressed.
+  * Any comments or reviews that cannot be resolved between the PR author and
+    reviewers can be submitted to the community [Approver]s and [Maintainer]s
+    during the weekly SIG meeting. If consensus is reached among the
+    [Approver]s and [Maintainer]s during the SIG meeting the objections to the
+    PR may be dismissed or resolved or the PR closed by a [Maintainer].
+  * Any substantive changes to the PR require existing Approval reviews be
+    cleared unless the approver explicitly states that their approval persists
+    across changes. This includes changes resulting from other feedback.
+    [Approver]s and [Maintainer]s can help in clearing reviews and they should
+    be consulted if there are any questions.
+
+* The PR branch is up to date with the base branch it is merging into.
+  * To ensure this does not block the PR, it should be configured to allow
+    maintainers to update it.
+
+* It has been open for review for at least one working day. This gives people
+  reasonable time to review.
+  * Trivial changes[^2] do not have to wait for one day and may be merged with
+    a single [Maintainer]'s approval.
+
+* All required GitHub workflows have succeeded.
+* Urgent fix can take exception as long as it has been actively communicated
+  among [Maintainer]s.
+
+Any [Maintainer] can merge the PR once the above criteria have been met.
+
+[^1]: A qualified approval is a GitHub Pull Request review with "Approve"
+  status from an OpenTelemetry Go [Approver] or [Maintainer].
+[^2]: Trivial changes include: typo corrections, cosmetic non-substantive
+  changes, documentation corrections or updates, dependency updates, etc.
+
+## Design Choices
+
+As with other OpenTelemetry clients, opentelemetry-go follows the
+[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
+
+It's especially valuable to read through the [library
+guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
+
+### Focus on Capabilities, Not Structure Compliance
+
+OpenTelemetry is an evolving specification, one where the desires and
+use cases are clear, but the method to satisfy those uses cases are
+not.
+
+As such, Contributions should provide functionality and behavior that
+conforms to the specification, but the interface and structure is
+flexible.
+
+It is preferable to have contributions follow the idioms of the
+language rather than conform to specific API names or argument
+patterns in the spec.
+
+For a deeper discussion, see
+[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
+
+## Documentation
+
+Each (non-internal, non-test) package must be documented using
+[Go Doc Comments](https://go.dev/doc/comment),
+preferably in a `doc.go` file.
+
+Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
+instead of putting code snippets in Go doc comments.
+In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
+
+You can install and run a "local Go Doc site" in the following way:
+
+  ```sh
+  go install golang.org/x/pkgsite/cmd/pkgsite@latest
+  pkgsite
+  ```
+
+[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
+is an example of a very well-documented package.
+
+## Style Guide
+
+One of the primary goals of this project is that it is actually used by
+developers. With this goal in mind the project strives to build
+user-friendly and idiomatic Go code adhering to the Go community's best
+practices.
+
+For a non-comprehensive but foundational overview of these best practices
+the [Effective Go](https://golang.org/doc/effective_go.html) documentation
+is an excellent starting place.
+
+As a convenience for developers building this project the `make precommit`
+will format, lint, validate, and in some cases fix the changes you plan to
+submit. This check will need to pass for your changes to be able to be
+merged.
+
+In addition to idiomatic Go, the project has adopted certain standards for
+implementations of common patterns. These standards should be followed as a
+default, and if they are not followed documentation needs to be included as
+to the reasons why.
+
+### Configuration
+
+When creating an instantiation function for a complex `type T struct`, it is
+useful to allow variable number of options to be applied. However, the strong
+type system of Go restricts the function design options. There are a few ways
+to solve this problem, but we have landed on the following design.
+
+#### `config`
+
+Configuration should be held in a `struct` named `config`, or prefixed with
+specific type name this Configuration applies to if there are multiple
+`config` in the package. This type must contain configuration options.
+
+```go
+// config contains configuration options for a thing.
+type config struct {
+	// options ...
+}
+```
+
+In general the `config` type will not need to be used externally to the
+package and should be unexported. If, however, it is expected that the user
+will likely want to build custom options for the configuration, the `config`
+should be exported. Please, include in the documentation for the `config`
+how the user can extend the configuration.
+
+It is important that internal `config` are not shared across package boundaries.
+Meaning a `config` from one package should not be directly used by another. The
+one exception is the API packages.  The configs from the base API, eg.
+`go.opentelemetry.io/otel/trace.TracerConfig` and
+`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
+by the SDK therefore it is expected that these are exported.
+
+When a config is exported we want to maintain forward and backward
+compatibility, to achieve this no fields should be exported but should
+instead be accessed by methods.
+
+Optionally, it is common to include a `newConfig` function (with the same
+naming scheme). This function wraps any defaults setting and looping over
+all options to create a configured `config`.
+
+```go
+// newConfig returns an appropriately configured config.
+func newConfig(options ...Option) config {
+	// Set default values for config.
+	config := config{/* […] */}
+	for _, option := range options {
+		config = option.apply(config)
+	}
+	// Perform any validation here.
+	return config
+}
+```
+
+If validation of the `config` options is also performed this can return an
+error as well that is expected to be handled by the instantiation function
+or propagated to the user.
+
+Given the design goal of not having the user need to work with the `config`,
+the `newConfig` function should also be unexported.
+
+#### `Option`
+
+To set the value of the options a `config` contains, a corresponding
+`Option` interface type should be used.
+
+```go
+type Option interface {
+	apply(config) config
+}
+```
+
+Having `apply` unexported makes sure that it will not be used externally.
+Moreover, the interface becomes sealed so the user cannot easily implement
+the interface on its own.
+
+The `apply` method should return a modified version of the passed config.
+This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
+
+The name of the interface should be prefixed in the same way the
+corresponding `config` is (if at all).
+
+#### Options
+
+All user configurable options for a `config` must have a related unexported
+implementation of the `Option` interface and an exported configuration
+function that wraps this implementation.
+
+The wrapping function name should be prefixed with `With*` (or in the
+special case of a boolean options `Without*`) and should have the following
+function signature.
+
+```go
+func With*(…) Option { … }
+```
+
+##### `bool` Options
+
+```go
+type defaultFalseOption bool
+
+func (o defaultFalseOption) apply(c config) config {
+	c.Bool = bool(o)
+    return c
+}
+
+// WithOption sets a T to have an option included.
+func WithOption() Option {
+	return defaultFalseOption(true)
+}
+```
+
+```go
+type defaultTrueOption bool
+
+func (o defaultTrueOption) apply(c config) config {
+	c.Bool = bool(o)
+    return c
+}
+
+// WithoutOption sets a T to have Bool option excluded.
+func WithoutOption() Option {
+	return defaultTrueOption(false)
+}
+```
+
+##### Declared Type Options
+
+```go
+type myTypeOption struct {
+	MyType MyType
+}
+
+func (o myTypeOption) apply(c config) config {
+	c.MyType = o.MyType
+    return c
+}
+
+// WithMyType sets T to have include MyType.
+func WithMyType(t MyType) Option {
+	return myTypeOption{t}
+}
+```
+
+##### Functional Options
+
+```go
+type optionFunc func(config) config
+
+func (fn optionFunc) apply(c config) config {
+	return fn(c)
+}
+
+// WithMyType sets t as MyType.
+func WithMyType(t MyType) Option {
+	return optionFunc(func(c config) config {
+		c.MyType = t
+        return c
+	})
+}
+```
+
+#### Instantiation
+
+Using this configuration pattern to configure instantiation with a `NewT`
+function.
+
+```go
+func NewT(options ...Option) T {…}
+```
+
+Any required parameters can be declared before the variadic `options`.
+
+#### Dealing with Overlap
+
+Sometimes there are multiple complex `struct` that share common
+configuration and also have distinct configuration. To avoid repeated
+portions of `config`s, a common `config` can be used with the union of
+options being handled with the `Option` interface.
+
+For example.
+
+```go
+// config holds options for all animals.
+type config struct {
+	Weight      float64
+	Color       string
+	MaxAltitude float64
+}
+
+// DogOption apply Dog specific options.
+type DogOption interface {
+	applyDog(config) config
+}
+
+// BirdOption apply Bird specific options.
+type BirdOption interface {
+	applyBird(config) config
+}
+
+// Option apply options for all animals.
+type Option interface {
+	BirdOption
+	DogOption
+}
+
+type weightOption float64
+
+func (o weightOption) applyDog(c config) config {
+	c.Weight = float64(o)
+	return c
+}
+
+func (o weightOption) applyBird(c config) config {
+	c.Weight = float64(o)
+	return c
+}
+
+func WithWeight(w float64) Option { return weightOption(w) }
+
+type furColorOption string
+
+func (o furColorOption) applyDog(c config) config {
+	c.Color = string(o)
+	return c
+}
+
+func WithFurColor(c string) DogOption { return furColorOption(c) }
+
+type maxAltitudeOption float64
+
+func (o maxAltitudeOption) applyBird(c config) config {
+	c.MaxAltitude = float64(o)
+	return c
+}
+
+func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
+
+func NewDog(name string, o ...DogOption) Dog    {…}
+func NewBird(name string, o ...BirdOption) Bird {…}
+```
+
+### Interfaces
+
+To allow other developers to better comprehend the code, it is important
+to ensure it is sufficiently documented. One simple measure that contributes
+to this aim is self-documenting by naming method parameters. Therefore,
+where appropriate, methods of every exported interface type should have
+their parameters appropriately named.
+
+#### Interface Stability
+
+All exported stable interfaces that include the following warning in their
+documentation are allowed to be extended with additional methods.
+
+> Warning: methods may be added to this interface in minor releases.
+
+These interfaces are defined by the OpenTelemetry specification and will be
+updated as the specification evolves.
+
+Otherwise, stable interfaces MUST NOT be modified.
+
+#### How to Change Specification Interfaces
+
+When an API change must be made, we will update the SDK with the new method one
+release before the API change. This will allow the SDK one version before the
+API change to work seamlessly with the new API.
+
+If an incompatible version of the SDK is used with the new API the application
+will fail to compile.
+
+#### How Not to Change Specification Interfaces
+
+We have explored using a v2 of the API to change interfaces and found that there
+was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
+Problems happened with libraries that upgraded to v2 when an application did not,
+and would not produce any telemetry.
+
+More detail of the approaches considered and their limitations can be found in
+the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
+issue.
+
+#### How to Change Other Interfaces
+
+If new functionality is needed for an interface that cannot be changed it MUST
+be added by including an additional interface. That added interface can be a
+simple interface for the specific functionality that you want to add or it can
+be a super-set of the original interface. For example, if you wanted to a
+`Close` method to the `Exporter` interface:
+
+```go
+type Exporter interface {
+	Export()
+}
+```
+
+A new interface, `Closer`, can be added:
+
+```go
+type Closer interface {
+	Close()
+}
+```
+
+Code that is passed the `Exporter` interface can now check to see if the passed
+value also satisfies the new interface. E.g.
+
+```go
+func caller(e Exporter) {
+	/* ... */
+	if c, ok := e.(Closer); ok {
+		c.Close()
+	}
+	/* ... */
+}
+```
+
+Alternatively, a new type that is the super-set of an `Exporter` can be created.
+
+```go
+type ClosingExporter struct {
+	Exporter
+	Close()
+}
+```
+
+This new type can be used similar to the simple interface above in that a
+passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
+and the `Close` method called.
+
+This super-set approach can be useful if there is explicit behavior that needs
+to be coupled with the original type and passed as a unified type to a new
+function, but, because of this coupling, it also limits the applicability of
+the added functionality. If there exist other interfaces where this
+functionality should be added, each one will need their own super-set
+interfaces and will duplicate the pattern. For this reason, the simple targeted
+interface that defines the specific functionality should be preferred.
+
+### Testing
+
+The tests should never leak goroutines.
+
+Use the term `ConcurrentSafe` in the test name when it aims to verify the
+absence of race conditions.
+
+### Internal packages
+
+The use of internal packages should be scoped to a single module. A sub-module
+should never import from a parent internal package. This creates a coupling
+between the two modules where a user can upgrade the parent without the child
+and if the internal package API has changed it will fail to upgrade[^3].
+
+There are two known exceptions to this rule:
+
+- `go.opentelemetry.io/otel/internal/global`
+  - This package manages global state for all of opentelemetry-go. It needs to
+  be a single package in order to ensure the uniqueness of the global state.
+- `go.opentelemetry.io/otel/internal/baggage`
+  - This package provides values in a `context.Context` that need to be
+  recognized by `go.opentelemetry.io/otel/baggage` and
+  `go.opentelemetry.io/otel/bridge/opentracing` but remain private.
+
+If you have duplicate code in multiple modules, make that code into a Go
+template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
+to render the templates in the desired locations. See [#4404] for an example of
+this.
+
+[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
+
+### Ignoring context cancellation
+
+OpenTelemetry API implementations need to ignore the cancellation of the context that are
+passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
+Recording methods should not return an error describing the cancellation state of the context
+when they complete, nor should they abort any work.
+
+This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
+the method. In that case the context cancellation can be used for the timeout with the
+restriction that this behavior is documented for the method. Otherwise, timeouts
+are expected to be handled by the user calling the API, not the implementation.
+
+Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
+of a provider. It is assumed the context passed from a user is not used for this purpose.
+
+Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
+force flushing telemetry, shutting down a signal provider) the context cancellation
+should be honored. This means all work done on behalf of the user provided context
+should be canceled.
+
+## Approvers and Maintainers
+
+### Approvers
+
+- [Evan Torrie](https://github.com/evantorrie), Verizon Media
+- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
+- [Chester Cheung](https://github.com/hanyuancheung), Tencent
+- [Damien Mathieu](https://github.com/dmathieu), Elastic
+- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
+
+### Maintainers
+
+- [David Ashpole](https://github.com/dashpole), Google
+- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
+- [Robert Pająk](https://github.com/pellared), Splunk
+- [Tyler Yahn](https://github.com/MrAlias), Splunk
+
+### Emeritus
+
+- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
+- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
+- [Josh MacDonald](https://github.com/jmacd), LightStep
+
+### Become an Approver or a Maintainer
+
+See the [community membership document in OpenTelemetry community
+repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
+
+[Approver]: #approvers
+[Maintainer]: #maintainers
+[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
+[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..35fc189961b69950ed6cbe2db982654cd9d24379
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -0,0 +1,318 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TOOLS_MOD_DIR := ./internal/tools
+
+ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
+ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+
+GO = go
+TIMEOUT = 60
+
+.DEFAULT_GOAL := precommit
+
+.PHONY: precommit ci
+precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
+ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
+
+# Tools
+
+TOOLS = $(CURDIR)/.tools
+
+$(TOOLS):
+	@mkdir -p $@
+$(TOOLS)/%: | $(TOOLS)
+	cd $(TOOLS_MOD_DIR) && \
+	$(GO) build -o $@ $(PACKAGE)
+
+MULTIMOD = $(TOOLS)/multimod
+$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
+
+SEMCONVGEN = $(TOOLS)/semconvgen
+$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
+
+CROSSLINK = $(TOOLS)/crosslink
+$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
+
+SEMCONVKIT = $(TOOLS)/semconvkit
+$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
+
+DBOTCONF = $(TOOLS)/dbotconf
+$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
+
+GOLANGCI_LINT = $(TOOLS)/golangci-lint
+$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
+
+MISSPELL = $(TOOLS)/misspell
+$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
+
+GOCOVMERGE = $(TOOLS)/gocovmerge
+$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
+
+STRINGER = $(TOOLS)/stringer
+$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
+
+PORTO = $(TOOLS)/porto
+$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
+
+GOJQ = $(TOOLS)/gojq
+$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
+
+GOTMPL = $(TOOLS)/gotmpl
+$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
+
+GORELEASE = $(TOOLS)/gorelease
+$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
+
+GOVULNCHECK = $(TOOLS)/govulncheck
+$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
+
+.PHONY: tools
+tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+
+# Virtualized python tools via docker
+
+# The directory where the virtual environment is created.
+VENVDIR := venv
+
+# The directory where the python tools are installed.
+PYTOOLS := $(VENVDIR)/bin
+
+# The pip executable in the virtual environment.
+PIP := $(PYTOOLS)/pip
+
+# The directory in the docker image where the current directory is mounted.
+WORKDIR := /workdir
+
+# The python image to use for the virtual environment.
+PYTHONIMAGE := python:3.11.3-slim-bullseye
+
+# Run the python image with the current directory mounted.
+DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
+
+# Create a virtual environment for Python tools.
+$(PYTOOLS):
+# The `--upgrade` flag is needed to ensure that the virtual environment is
+# created with the latest pip version.
+	@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
+
+# Install python packages into the virtual environment.
+$(PYTOOLS)/%: | $(PYTOOLS)
+	@$(DOCKERPY) $(PIP) install -r requirements.txt
+
+CODESPELL = $(PYTOOLS)/codespell
+$(CODESPELL): PACKAGE=codespell
+
+# Generate
+
+.PHONY: generate
+generate: go-generate vanity-import-fix
+
+.PHONY: go-generate
+go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
+go-generate/%: DIR=$*
+go-generate/%: | $(STRINGER) $(GOTMPL)
+	@echo "$(GO) generate $(DIR)/..." \
+		&& cd $(DIR) \
+		&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
+
+.PHONY: vanity-import-fix
+vanity-import-fix: | $(PORTO)
+	@$(PORTO) --include-internal -w .
+
+# Generate go.work file for local development.
+.PHONY: go-work
+go-work: | $(CROSSLINK)
+	$(CROSSLINK) work --root=$(shell pwd)
+
+# Build
+
+.PHONY: build
+
+build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
+build/%: DIR=$*
+build/%:
+	@echo "$(GO) build $(DIR)/..." \
+		&& cd $(DIR) \
+		&& $(GO) build ./...
+
+build-tests/%: DIR=$*
+build-tests/%:
+	@echo "$(GO) build tests $(DIR)/..." \
+		&& cd $(DIR) \
+		&& $(GO) list ./... \
+		| grep -v third_party \
+		| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race
+.PHONY: $(TEST_TARGETS) test
+test-default test-race: ARGS=-race
+test-bench:   ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
+test-short:   ARGS=-short
+test-verbose: ARGS=-v -race
+$(TEST_TARGETS): test
+test: $(OTEL_GO_MOD_DIRS:%=test/%)
+test/%: DIR=$*
+test/%:
+	@echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
+		&& cd $(DIR) \
+		&& $(GO) list ./... \
+		| grep -v third_party \
+		| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
+
+COVERAGE_MODE    = atomic
+COVERAGE_PROFILE = coverage.out
+.PHONY: test-coverage
+test-coverage: | $(GOCOVMERGE)
+	@set -e; \
+	printf "" > coverage.txt; \
+	for dir in $(ALL_COVERAGE_MOD_DIRS); do \
+	  echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
+	  (cd "$${dir}" && \
+	    $(GO) list ./... \
+	    | grep -v third_party \
+	    | grep -v 'semconv/v.*' \
+	    | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
+	  $(GO) tool cover -html=coverage.out -o coverage.html); \
+	done; \
+	$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
+
+# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
+BENCHMARK_TARGETS := sdk/trace
+.PHONY: benchmark
+benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
+BENCHMARK_FILTER = .
+# You can override the filter for a particular directory by adding a rule here.
+benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
+benchmark/%:
+	@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
+		&& cd $* \
+		$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
+
+.PHONY: golangci-lint golangci-lint-fix
+golangci-lint-fix: ARGS=--fix
+golangci-lint-fix: golangci-lint
+golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
+golangci-lint/%: DIR=$*
+golangci-lint/%: | $(GOLANGCI_LINT)
+	@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
+		&& cd $(DIR) \
+		&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
+
+.PHONY: crosslink
+crosslink: | $(CROSSLINK)
+	@echo "Updating intra-repository dependencies in all go modules" \
+		&& $(CROSSLINK) --root=$(shell pwd) --prune
+
+.PHONY: go-mod-tidy
+go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
+go-mod-tidy/%: DIR=$*
+go-mod-tidy/%: | crosslink
+	@echo "$(GO) mod tidy in $(DIR)" \
+		&& cd $(DIR) \
+		&& $(GO) mod tidy -compat=1.20
+
+.PHONY: lint-modules
+lint-modules: go-mod-tidy
+
+.PHONY: lint
+lint: misspell lint-modules golangci-lint govulncheck
+
+.PHONY: vanity-import-check
+vanity-import-check: | $(PORTO)
+	@$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
+
+.PHONY: misspell
+misspell: | $(MISSPELL)
+	@$(MISSPELL) -w $(ALL_DOCS)
+
+.PHONY: govulncheck
+govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
+govulncheck/%: DIR=$*
+govulncheck/%: | $(GOVULNCHECK)
+	@echo "govulncheck ./... in $(DIR)" \
+		&& cd $(DIR) \
+		&& $(GOVULNCHECK) ./...
+
+.PHONY: codespell
+codespell: | $(CODESPELL)
+	@$(DOCKERPY) $(CODESPELL)
+
+.PHONY: license-check
+license-check:
+	@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
+	           awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
+	   done); \
+	   if [ -n "$${licRes}" ]; then \
+	           echo "license header checking failed:"; echo "$${licRes}"; \
+	           exit 1; \
+	   fi
+
+DEPENDABOT_CONFIG = .github/dependabot.yml
+.PHONY: dependabot-check
+dependabot-check: | $(DBOTCONF)
+	@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
+
+.PHONY: dependabot-generate
+dependabot-generate: | $(DBOTCONF)
+	@$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
+
+.PHONY: check-clean-work-tree
+check-clean-work-tree:
+	@if ! git diff --quiet; then \
+	  echo; \
+	  echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
+	  echo; \
+	  git status; \
+	  exit 1; \
+	fi
+
+SEMCONVPKG ?= "semconv/"
+.PHONY: semconv-generate
+semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
+	[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
+	[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
+	$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
+
+.PHONY: gorelease
+gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
+gorelease/%: DIR=$*
+gorelease/%:| $(GORELEASE)
+	@echo "gorelease in $(DIR):" \
+		&& cd $(DIR) \
+		&& $(GORELEASE) \
+		|| echo ""
+
+.PHONY: prerelease
+prerelease: | $(MULTIMOD)
+	@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+	$(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
+
+COMMIT ?= "HEAD"
+.PHONY: add-tags
+add-tags: | $(MULTIMOD)
+	@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+	$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
+
+.PHONY: lint-markdown
+lint-markdown: 
+	docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..44e1bfc9b5e4c11d09a73bf64b640560a657f42b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -0,0 +1,108 @@
+# OpenTelemetry-Go
+
+[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
+[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
+[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
+[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
+
+OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
+It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
+
+## Project Status
+
+| Signal  | Status     |
+|---------|------------|
+| Traces  | Stable     |
+| Metrics | Stable     |
+| Logs    | Design [1] |
+
+- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
+   No Logs Pull Requests are currently being accepted.
+
+Progress and status specific to this repository is tracked in our
+[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
+and
+[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
+
+Project versioning information and stability guarantees can be found in the
+[versioning documentation](VERSIONING.md).
+
+### Compatibility
+
+OpenTelemetry-Go ensures compatibility with the current supported versions of
+the [Go language](https://golang.org/doc/devel/release#policy):
+
+> Each major Go release is supported until there are two newer major releases.
+> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
+
+For versions of Go that are no longer supported upstream, opentelemetry-go will
+stop ensuring compatibility with these versions in the following manner:
+
+- A minor release of opentelemetry-go will be made to add support for the new
+  supported release of Go.
+- The following minor release of opentelemetry-go will remove compatibility
+  testing for the oldest (now archived upstream) version of Go. This, and
+  future, releases of opentelemetry-go may include features only supported by
+  the currently supported versions of Go.
+
+Currently, this project supports the following environments.
+
+| OS      | Go Version | Architecture |
+|---------|------------|--------------|
+| Ubuntu  | 1.21       | amd64        |
+| Ubuntu  | 1.20       | amd64        |
+| Ubuntu  | 1.21       | 386          |
+| Ubuntu  | 1.20       | 386          |
+| MacOS   | 1.21       | amd64        |
+| MacOS   | 1.20       | amd64        |
+| Windows | 1.21       | amd64        |
+| Windows | 1.20       | amd64        |
+| Windows | 1.21       | 386          |
+| Windows | 1.20       | 386          |
+
+While this project should work for other systems, no compatibility guarantees
+are made for those systems currently.
+
+## Getting Started
+
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
+
+OpenTelemetry's goal is to provide a single set of APIs to capture distributed
+traces and metrics from your application and send them to an observability
+platform. This project allows you to do just that for applications written in
+Go. There are two steps to this process: instrument your application, and
+configure an exporter.
+
+### Instrumentation
+
+To start capturing distributed traces and metric events from your application
+it first needs to be instrumented. The easiest way to do this is by using an
+instrumentation library for your code. Be sure to check out [the officially
+supported instrumentation
+libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
+
+If you need to extend the telemetry an instrumentation library provides or want
+to build your own instrumentation for your application directly you will need
+to use the
+[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
+package. The included [examples](./example/) are a good way to see some
+practical uses of this process.
+
+### Export
+
+Now that your application is instrumented to collect telemetry, it needs an
+export pipeline to send that telemetry to an observability platform.
+
+All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
+
+| Exporter                              | Metrics | Traces |
+|---------------------------------------|:-------:|:------:|
+| [OTLP](./exporters/otlp/)             |    ✓    |   ✓    |
+| [Prometheus](./exporters/prometheus/) |    ✓    |        |
+| [stdout](./exporters/stdout/)         |    ✓    |   ✓    |
+| [Zipkin](./exporters/zipkin/)         |         |   ✓    |
+
+## Contributing
+
+See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2691d0bd8b12953c9bea84709dca4a197dba646
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -0,0 +1,139 @@
+# Release Process
+
+## Semantic Convention Generation
+
+New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
+The `semconv-generate` make target is used for this.
+
+1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
+2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
+3. Run the `make semconv-generate ...` target from this repository.
+
+For example,
+
+```sh
+export TAG="v1.21.0" # Change to the release version you are generating.
+export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
+docker pull otel/semconvgen:latest
+make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
+```
+
+This should create a new sub-package of [`semconv`](./semconv).
+Ensure things look correct before submitting a pull request to include the addition.
+
+## Breaking changes validation
+
+You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+
+You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
+
+## Pre-Release
+
+First, decide which module sets will be released and update their versions
+in `versions.yaml`.  Commit this change to a new branch.
+
+Update go.mod for submodules to depend on the new release which will happen in the next step.
+
+1. Run the `prerelease` make target. It creates a branch
+    `prerelease_<module set>_<new tag>` that will contain all release changes.
+
+    ```
+    make prerelease MODSET=<module set>
+    ```
+
+2. Verify the changes.
+
+    ```
+    git diff ...prerelease_<module set>_<new tag>
+    ```
+
+    This should have changed the version for all modules to be `<new tag>`.
+    If these changes look correct, merge them into your pre-release branch:
+
+    ```go
+    git merge prerelease_<module set>_<new tag>
+    ```
+
+3. Update the [Changelog](./CHANGELOG.md).
+   - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+       To verify this, you can look directly at the commits since the `<last tag>`.
+
+       ```
+       git --no-pager log --pretty=oneline "<last tag>..HEAD"
+       ```
+
+   - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
+   - Update all the appropriate links at the bottom.
+
+4. Push the changes to upstream and create a Pull Request on GitHub.
+    Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
+
+## Tag
+
+Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
+
+***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
+Failure to do so will leave things in a broken state. As long as you do not
+change `versions.yaml` between pre-release and this step, things should be fine.
+
+***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
+It is critical you make sure the version you push upstream is correct.
+[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
+
+1. For each module set that will be released, run the `add-tags` make target
+    using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
+
+    ```
+    make add-tags MODSET=<module set> COMMIT=<commit hash>
+    ```
+
+    It should only be necessary to provide an explicit `COMMIT` value if the
+    current `HEAD` of your working directory is not the correct commit.
+
+2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
+    Make sure you push all sub-modules as well.
+
+    ```
+    git push upstream <new tag>
+    git push upstream <submodules-path/new tag>
+    ...
+    ```
+
+## Release
+
+Finally create a Release for the new `<new tag>` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+
+## Verify Examples
+
+After releasing verify that examples build outside of the repository.
+
+```
+./verify_examples.sh
+```
+
+The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
+This ensures they build with the published release, not the local copy.
+
+## Post-Release
+
+### Contrib Repository
+
+Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
+
+### Website Documentation
+
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
+Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
+
+[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
+[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
+[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
+
+### Demo Repository
+
+Bump the dependencies in the following Go services:
+
+- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
new file mode 100644
index 0000000000000000000000000000000000000000..412f1e362bbeb56f9bacab824778ff0c52001f9d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -0,0 +1,224 @@
+# Versioning
+
+This document describes the versioning policy for this repository. This policy
+is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this project will be idiomatic of a Go project using [Go
+  modules](https://github.com/golang/go/wiki/Modules).
+  * [Semantic import
+    versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+    will be used.
+    * Versions will comply with [semver
+      2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
+      * New methods may be added to exported API interfaces. All exported
+        interfaces that fall within this exception will include the following
+        paragraph in their public documentation.
+
+        > Warning: methods may be added to this interface in minor releases.
+
+    * If a module is version `v2` or higher, the major version of the module
+      must be included as a `/vN` at the end of the module paths used in
+      `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
+      go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
+      (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
+      paths used in `go get` commands (e.g., `go get
+      go.opentelemetry.io/otel/v2@v2.0.1`.  Note there is both a `/v2` and a
+      `@v2.0.1` in that example. One way to think about it is that the module
+      name now includes the `/v2`, so include `/v2` whenever you are using the
+      module name).
+    * If a module is version `v0` or `v1`, do not include the major version in
+      either the module path or the import path.
+  * Modules will be used to encapsulate signals and components.
+    * Experimental modules still under active development will be versioned at
+      `v0` to imply the stability guarantee defined by
+      [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+      > Major version zero (0.y.z) is for initial development. Anything MAY
+      > change at any time. The public API SHOULD NOT be considered stable.
+
+    * Mature modules for which we guarantee a stable public API will be versioned
+      with a major version greater than `v0`.
+      * The decision to make a module stable will be made on a case-by-case
+        basis by the maintainers of this project.
+    * Experimental modules will start their versioning at `v0.0.0` and will
+      increment their minor version when backwards incompatible changes are
+      released and increment their patch version when backwards compatible
+      changes are released.
+    * All stable modules that use the same major version number will use the
+      same entire version number.
+      * Stable modules may be released with an incremented minor or patch
+        version even though that module has not been changed, but rather so
+        that it will remain at the same version as other stable modules that
+        did undergo change.
+      * When an experimental module becomes stable a new stable module version
+        will be released and will include this now stable module. The new
+        stable module version will be an increment of the minor version number
+        and will be applied to all existing stable modules as well as the newly
+        stable module being released.
+* Versioning of the associated [contrib
+  repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
+  this project will be idiomatic of a Go project using [Go
+  modules](https://github.com/golang/go/wiki/Modules).
+  * [Semantic import
+    versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+    will be used.
+    * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+    * If a module is version `v2` or higher, the
+      major version of the module must be included as a `/vN` at the end of the
+      module paths used in `go.mod` files (e.g., `module
+      go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
+      go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
+      package import path (e.g., `import
+      "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
+      the paths used in `go get` commands (e.g., `go get
+      go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`.  Note there
+      is both a `/v2` and a `@v2.0.1` in that example. One way to think about
+      it is that the module name now includes the `/v2`, so include `/v2`
+      whenever you are using the module name).
+    * If a module is version `v0` or `v1`, do not include the major version
+      in either the module path or the import path.
+  * In addition to public APIs, telemetry produced by stable instrumentation
+    will remain stable and backwards compatible. This is to avoid breaking
+    alerts and dashboard.
+  * Modules will be used to encapsulate instrumentation, detectors, exporters,
+    propagators, and any other independent sets of related components.
+    * Experimental modules still under active development will be versioned at
+      `v0` to imply the stability guarantee defined by
+      [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+      > Major version zero (0.y.z) is for initial development. Anything MAY
+      > change at any time. The public API SHOULD NOT be considered stable.
+
+    * Mature modules for which we guarantee a stable public API and telemetry will
+      be versioned with a major version greater than `v0`.
+    * Experimental modules will start their versioning at `v0.0.0` and will
+      increment their minor version when backwards incompatible changes are
+      released and increment their patch version when backwards compatible
+      changes are released.
+    * Stable contrib modules cannot depend on experimental modules from this
+      project.
+    * All stable contrib modules of the same major version with this project
+      will use the same entire version as this project.
+      * Stable modules may be released with an incremented minor or patch
+        version even though that module's code has not been changed. Instead
+        the only change that will have been included is to have updated that
+        modules dependency on this project's stable APIs.
+      * When an experimental module in contrib becomes stable a new stable
+        module version will be released and will include this now stable
+        module. The new stable module version will be an increment of the minor
+        version number and will be applied to all existing stable contrib
+        modules, this project's modules, and the newly stable module being
+        released.
+  * Contrib modules will be kept up to date with this project's releases.
+    * Due to the dependency contrib modules will implicitly have on this
+      project's modules the release of stable contrib modules to match the
+      released version number will be staggered after this project's release.
+      There is no explicit time guarantee for how long after this projects
+      release the contrib release will be. Effort should be made to keep them
+      as close in time as possible.
+    * No additional stable release in this project can be made until the
+      contrib repository has a matching stable release.
+    * No release can be made in the contrib repository after this project's
+      stable release except for a stable release of the contrib repository.
+* GitHub releases will be made for all releases.
+* Go modules will be made available at Go package mirrors.
+
+## Example Versioning Lifecycle
+
+To better understand the implementation of the above policy the following
+example is provided. This project is simplified to include only the following
+modules and their versions:
+
+* `otel`: `v0.14.0`
+* `otel/trace`: `v0.14.0`
+* `otel/metric`: `v0.14.0`
+* `otel/baggage`: `v0.14.0`
+* `otel/sdk/trace`: `v0.14.0`
+* `otel/sdk/metric`: `v0.14.0`
+
+These modules have been developed to a point where the `otel/trace`,
+`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
+should be considered for a stable release. The `otel/metric` and
+`otel/sdk/metric` are still under active development and the `otel` module
+depends on both `otel/trace` and `otel/metric`.
+
+The `otel` package is refactored to remove its dependencies on `otel/metric` so
+it can be released as stable as well. With that done the following release
+candidates are made:
+
+* `otel`: `v1.0.0-RC1`
+* `otel/trace`: `v1.0.0-RC1`
+* `otel/baggage`: `v1.0.0-RC1`
+* `otel/sdk/trace`: `v1.0.0-RC1`
+
+The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
+
+A few minor issues are discovered in the `otel/trace` package. These issues are
+resolved with some minor, but backwards incompatible, changes and are released
+as a second release candidate:
+
+* `otel`: `v1.0.0-RC2`
+* `otel/trace`: `v1.0.0-RC2`
+* `otel/baggage`: `v1.0.0-RC2`
+* `otel/sdk/trace`: `v1.0.0-RC2`
+
+Notice that all module version numbers are incremented to adhere to our
+versioning policy.
+
+After these release candidates have been evaluated to satisfaction, they are
+released as version `v1.0.0`.
+
+* `otel`: `v1.0.0`
+* `otel/trace`: `v1.0.0`
+* `otel/baggage`: `v1.0.0`
+* `otel/sdk/trace`: `v1.0.0`
+
+Since both the `go` utility and the Go module system support [the semantic
+versioning definition of
+precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
+will correctly be interpreted as the successor to the previous release
+candidates.
+
+Active development of this project continues. The `otel/metric` module now has
+backwards incompatible changes to its API that need to be released and the
+`otel/baggage` module has a minor bug fix that needs to be released. The
+following release is made:
+
+* `otel`: `v1.0.1`
+* `otel/trace`: `v1.0.1`
+* `otel/metric`: `v0.15.0`
+* `otel/baggage`: `v1.0.1`
+* `otel/sdk/trace`: `v1.0.1`
+* `otel/sdk/metric`: `v0.15.0`
+
+Notice that, again, all stable module versions are incremented in unison and
+the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
+bumped its version. This bump of the `otel/sdk/metric` package makes sense
+given their coupling, though it is not explicitly required by our versioning
+policy.
+
+As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
+point where they should be evaluated for stability. The `otel` module is
+reintegrated with the `otel/metric` package and the following release is made:
+
+* `otel`: `v1.1.0-RC1`
+* `otel/trace`: `v1.1.0-RC1`
+* `otel/metric`: `v1.1.0-RC1`
+* `otel/baggage`: `v1.1.0-RC1`
+* `otel/sdk/trace`: `v1.1.0-RC1`
+* `otel/sdk/metric`: `v1.1.0-RC1`
+
+All the modules are evaluated and determined to a viable stable release. They
+are then released as version `v1.1.0` (the minor version is incremented to
+indicate the addition of new signal).
+
+* `otel`: `v1.1.0`
+* `otel/trace`: `v1.1.0`
+* `otel/metric`: `v1.1.0`
+* `otel/baggage`: `v1.1.0`
+* `otel/sdk/trace`: `v1.1.0`
+* `otel/sdk/metric`: `v1.1.0`
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
similarity index 77%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
rename to vendor/go.opentelemetry.io/otel/attribute/doc.go
index c318385cbed0eb46c4b4ce9fb69b4bfdf55f773c..dafe7424dfbb488aafe88743bcc44e64167a3831 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2013 Matt T. Proud
+// Copyright The OpenTelemetry Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,5 +12,5 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
+// Package attribute provides key and value attributes.
+package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe2bc5766cff99118e6b489cb3493161d6d21cc9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -0,0 +1,146 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"bytes"
+	"sync"
+	"sync/atomic"
+)
+
+type (
+	// Encoder is a mechanism for serializing an attribute set into a specific
+	// string representation that supports caching, to avoid repeated
+	// serialization. An example could be an exporter encoding the attribute
+	// set into a wire representation.
+	Encoder interface {
+		// Encode returns the serialized encoding of the attribute set using
+		// its Iterator. This result may be cached by a attribute.Set.
+		Encode(iterator Iterator) string
+
+		// ID returns a value that is unique for each class of attribute
+		// encoder. Attribute encoders allocate these using `NewEncoderID`.
+		ID() EncoderID
+	}
+
+	// EncoderID is used to identify distinct Encoder
+	// implementations, for caching encoded results.
+	EncoderID struct {
+		value uint64
+	}
+
+	// defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
+	// allocations used in encoding attributes. This implementation encodes a
+	// comma-separated list of key=value, with '/'-escaping of '=', ',', and
+	// '\'.
+	defaultAttrEncoder struct {
+		// pool is a pool of attribute set builders. The buffers in this pool
+		// grow to a size that most attribute encodings will not allocate new
+		// memory.
+		pool sync.Pool // *bytes.Buffer
+	}
+)
+
+// escapeChar is used to ensure uniqueness of the attribute encoding where
+// keys or values contain either '=' or ','.  Since there is no parser needed
+// for this encoding and its only requirement is to be unique, this choice is
+// arbitrary.  Users will see these in some exporters (e.g., stdout), so the
+// backslash ('\') is used as a conventional choice.
+const escapeChar = '\\'
+
+var (
+	_ Encoder = &defaultAttrEncoder{}
+
+	// encoderIDCounter is for generating IDs for other attribute encoders.
+	encoderIDCounter uint64
+
+	defaultEncoderOnce     sync.Once
+	defaultEncoderID       = NewEncoderID()
+	defaultEncoderInstance *defaultAttrEncoder
+)
+
+// NewEncoderID returns a unique attribute encoder ID. It should be called
+// once per each type of attribute encoder. Preferably in init() or in var
+// definition.
+func NewEncoderID() EncoderID {
+	return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
+}
+
+// DefaultEncoder returns an attribute encoder that encodes attributes in such
+// a way that each escaped attribute's key is followed by an equal sign and
+// then by an escaped attribute's value. All key-value pairs are separated by
+// a comma.
+//
+// Escaping is done by prepending a backslash before either a backslash, equal
+// sign or a comma.
+func DefaultEncoder() Encoder {
+	defaultEncoderOnce.Do(func() {
+		defaultEncoderInstance = &defaultAttrEncoder{
+			pool: sync.Pool{
+				New: func() interface{} {
+					return &bytes.Buffer{}
+				},
+			},
+		}
+	})
+	return defaultEncoderInstance
+}
+
+// Encode is a part of an implementation of the AttributeEncoder interface.
+func (d *defaultAttrEncoder) Encode(iter Iterator) string {
+	buf := d.pool.Get().(*bytes.Buffer)
+	defer d.pool.Put(buf)
+	buf.Reset()
+
+	for iter.Next() {
+		i, keyValue := iter.IndexedAttribute()
+		if i > 0 {
+			_, _ = buf.WriteRune(',')
+		}
+		copyAndEscape(buf, string(keyValue.Key))
+
+		_, _ = buf.WriteRune('=')
+
+		if keyValue.Value.Type() == STRING {
+			copyAndEscape(buf, keyValue.Value.AsString())
+		} else {
+			_, _ = buf.WriteString(keyValue.Value.Emit())
+		}
+	}
+	return buf.String()
+}
+
+// ID is a part of an implementation of the AttributeEncoder interface.
+func (*defaultAttrEncoder) ID() EncoderID {
+	return defaultEncoderID
+}
+
+// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
+// making the default encoding unique.
+func copyAndEscape(buf *bytes.Buffer, val string) {
+	for _, ch := range val {
+		switch ch {
+		case '=', ',', escapeChar:
+			_, _ = buf.WriteRune(escapeChar)
+		}
+		_, _ = buf.WriteRune(ch)
+	}
+}
+
+// Valid returns true if this encoder ID was allocated by
+// `NewEncoderID`.  Invalid encoder IDs will not be cached.
+func (id EncoderID) Valid() bool {
+	return id.value != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
new file mode 100644
index 0000000000000000000000000000000000000000..638c213d59ab483c92cf69f8f43c2afc59b1580d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -0,0 +1,60 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Filter supports removing certain attributes from attribute sets. When
+// the filter returns true, the attribute will be kept in the filtered
+// attribute set. When the filter returns false, the attribute is excluded
+// from the filtered attribute set, and the attribute instead appears in
+// the removed list of excluded attributes.
+type Filter func(KeyValue) bool
+
+// NewAllowKeysFilter returns a Filter that only allows attributes with one of
+// the provided keys.
+//
+// If keys is empty a deny-all filter is returned.
+func NewAllowKeysFilter(keys ...Key) Filter {
+	if len(keys) <= 0 {
+		return func(kv KeyValue) bool { return false }
+	}
+
+	allowed := make(map[Key]struct{})
+	for _, k := range keys {
+		allowed[k] = struct{}{}
+	}
+	return func(kv KeyValue) bool {
+		_, ok := allowed[kv.Key]
+		return ok
+	}
+}
+
+// NewDenyKeysFilter returns a Filter that only allows attributes
+// that do not have one of the provided keys.
+//
+// If keys is empty an allow-all filter is returned.
+func NewDenyKeysFilter(keys ...Key) Filter {
+	if len(keys) <= 0 {
+		return func(kv KeyValue) bool { return true }
+	}
+
+	forbid := make(map[Key]struct{})
+	for _, k := range keys {
+		forbid[k] = struct{}{}
+	}
+	return func(kv KeyValue) bool {
+		_, ok := forbid[kv.Key]
+		return !ok
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
new file mode 100644
index 0000000000000000000000000000000000000000..841b271fb7dd7d39263bb6234cb4235fab954dfc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -0,0 +1,161 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Iterator allows iterating over the set of attributes in order, sorted by
+// key.
+type Iterator struct {
+	storage *Set
+	idx     int
+}
+
+// MergeIterator supports iterating over two sets of attributes while
+// eliminating duplicate values from the combined set. The first iterator
+// value takes precedence.
+type MergeIterator struct {
+	one     oneIterator
+	two     oneIterator
+	current KeyValue
+}
+
+type oneIterator struct {
+	iter Iterator
+	done bool
+	attr KeyValue
+}
+
+// Next moves the iterator to the next position. Returns false if there are no
+// more attributes.
+func (i *Iterator) Next() bool {
+	i.idx++
+	return i.idx < i.Len()
+}
+
+// Label returns current KeyValue. Must be called only after Next returns
+// true.
+//
+// Deprecated: Use Attribute instead.
+func (i *Iterator) Label() KeyValue {
+	return i.Attribute()
+}
+
+// Attribute returns the current KeyValue of the Iterator. It must be called
+// only after Next returns true.
+func (i *Iterator) Attribute() KeyValue {
+	kv, _ := i.storage.Get(i.idx)
+	return kv
+}
+
+// IndexedLabel returns current index and attribute. Must be called only
+// after Next returns true.
+//
+// Deprecated: Use IndexedAttribute instead.
+func (i *Iterator) IndexedLabel() (int, KeyValue) {
+	return i.idx, i.Attribute()
+}
+
+// IndexedAttribute returns current index and attribute. Must be called only
+// after Next returns true.
+func (i *Iterator) IndexedAttribute() (int, KeyValue) {
+	return i.idx, i.Attribute()
+}
+
+// Len returns a number of attributes in the iterated set.
+func (i *Iterator) Len() int {
+	return i.storage.Len()
+}
+
+// ToSlice is a convenience function that creates a slice of attributes from
+// the passed iterator. The iterator is set up to start from the beginning
+// before creating the slice.
+func (i *Iterator) ToSlice() []KeyValue {
+	l := i.Len()
+	if l == 0 {
+		return nil
+	}
+	i.idx = -1
+	slice := make([]KeyValue, 0, l)
+	for i.Next() {
+		slice = append(slice, i.Attribute())
+	}
+	return slice
+}
+
+// NewMergeIterator returns a MergeIterator for merging two attribute sets.
+// Duplicates are resolved by taking the value from the first set.
+func NewMergeIterator(s1, s2 *Set) MergeIterator {
+	mi := MergeIterator{
+		one: makeOne(s1.Iter()),
+		two: makeOne(s2.Iter()),
+	}
+	return mi
+}
+
+func makeOne(iter Iterator) oneIterator {
+	oi := oneIterator{
+		iter: iter,
+	}
+	oi.advance()
+	return oi
+}
+
+func (oi *oneIterator) advance() {
+	if oi.done = !oi.iter.Next(); !oi.done {
+		oi.attr = oi.iter.Attribute()
+	}
+}
+
+// Next returns true if there is another attribute available.
+func (m *MergeIterator) Next() bool {
+	if m.one.done && m.two.done {
+		return false
+	}
+	if m.one.done {
+		m.current = m.two.attr
+		m.two.advance()
+		return true
+	}
+	if m.two.done {
+		m.current = m.one.attr
+		m.one.advance()
+		return true
+	}
+	if m.one.attr.Key == m.two.attr.Key {
+		m.current = m.one.attr // first iterator attribute value wins
+		m.one.advance()
+		m.two.advance()
+		return true
+	}
+	if m.one.attr.Key < m.two.attr.Key {
+		m.current = m.one.attr
+		m.one.advance()
+		return true
+	}
+	m.current = m.two.attr
+	m.two.advance()
+	return true
+}
+
+// Label returns the current value after Next() returns true.
+//
+// Deprecated: Use Attribute instead.
+func (m *MergeIterator) Label() KeyValue {
+	return m.current
+}
+
+// Attribute returns the current value after Next() returns true.
+func (m *MergeIterator) Attribute() KeyValue {
+	return m.current
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
new file mode 100644
index 0000000000000000000000000000000000000000..0656a04e43bf8d6e9079386a9069dcb72b70013f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -0,0 +1,134 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Key represents the key part in key-value pairs. It's a string. The
+// allowed character set in the key depends on the use of the key.
+type Key string
+
+// Bool creates a KeyValue instance with a BOOL Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Bool(name, value).
+func (k Key) Bool(v bool) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: BoolValue(v),
+	}
+}
+
+// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- BoolSlice(name, value).
+func (k Key) BoolSlice(v []bool) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: BoolSliceValue(v),
+	}
+}
+
+// Int creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int(name, value).
+func (k Key) Int(v int) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: IntValue(v),
+	}
+}
+
+// IntSlice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- IntSlice(name, value).
+func (k Key) IntSlice(v []int) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: IntSliceValue(v),
+	}
+}
+
+// Int64 creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64(name, value).
+func (k Key) Int64(v int64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Int64Value(v),
+	}
+}
+
+// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64Slice(name, value).
+func (k Key) Int64Slice(v []int64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Int64SliceValue(v),
+	}
+}
+
+// Float64 creates a KeyValue instance with a FLOAT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64(v float64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Float64Value(v),
+	}
+}
+
+// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64Slice(v []float64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Float64SliceValue(v),
+	}
+}
+
+// String creates a KeyValue instance with a STRING Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- String(name, value).
+func (k Key) String(v string) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: StringValue(v),
+	}
+}
+
+// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- StringSlice(name, value).
+func (k Key) StringSlice(v []string) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: StringSliceValue(v),
+	}
+}
+
+// Defined returns true for non-empty keys.
+func (k Key) Defined() bool {
+	return len(k) != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ddf3ce05805db7e893db52e48d586e8d00422ce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -0,0 +1,86 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"fmt"
+)
+
+// KeyValue holds a key and value pair.
+type KeyValue struct {
+	Key   Key
+	Value Value
+}
+
+// Valid returns if kv is a valid OpenTelemetry attribute.
+func (kv KeyValue) Valid() bool {
+	return kv.Key.Defined() && kv.Value.Type() != INVALID
+}
+
+// Bool creates a KeyValue with a BOOL Value type.
+func Bool(k string, v bool) KeyValue {
+	return Key(k).Bool(v)
+}
+
+// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
+func BoolSlice(k string, v []bool) KeyValue {
+	return Key(k).BoolSlice(v)
+}
+
+// Int creates a KeyValue with an INT64 Value type.
+func Int(k string, v int) KeyValue {
+	return Key(k).Int(v)
+}
+
+// IntSlice creates a KeyValue with an INT64SLICE Value type.
+func IntSlice(k string, v []int) KeyValue {
+	return Key(k).IntSlice(v)
+}
+
+// Int64 creates a KeyValue with an INT64 Value type.
+func Int64(k string, v int64) KeyValue {
+	return Key(k).Int64(v)
+}
+
+// Int64Slice creates a KeyValue with an INT64SLICE Value type.
+func Int64Slice(k string, v []int64) KeyValue {
+	return Key(k).Int64Slice(v)
+}
+
+// Float64 creates a KeyValue with a FLOAT64 Value type.
+func Float64(k string, v float64) KeyValue {
+	return Key(k).Float64(v)
+}
+
+// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
+func Float64Slice(k string, v []float64) KeyValue {
+	return Key(k).Float64Slice(v)
+}
+
+// String creates a KeyValue with a STRING Value type.
+func String(k, v string) KeyValue {
+	return Key(k).String(v)
+}
+
+// StringSlice creates a KeyValue with a STRINGSLICE Value type.
+func StringSlice(k string, v []string) KeyValue {
+	return Key(k).StringSlice(v)
+}
+
+// Stringer creates a new key-value pair with a passed name and a string
+// value generated by the passed Stringer interface.
+func Stringer(k string, v fmt.Stringer) KeyValue {
+	return Key(k).String(v.String())
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb6da51450cc7f8f20e93dc1f3ee311a7331cb18
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -0,0 +1,452 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"encoding/json"
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type (
+	// Set is the representation for a distinct attribute set. It manages an
+	// immutable set of attributes, with an internal cache for storing
+	// attribute encodings.
+	//
+	// This type supports the Equivalent method of comparison using values of
+	// type Distinct.
+	Set struct {
+		equivalent Distinct
+	}
+
+	// Distinct wraps a variable-size array of KeyValue, constructed with keys
+	// in sorted order. This can be used as a map key or for equality checking
+	// between Sets.
+	Distinct struct {
+		iface interface{}
+	}
+
+	// Sortable implements sort.Interface, used for sorting KeyValue. This is
+	// an exported type to support a memory optimization. A pointer to one of
+	// these is needed for the call to sort.Stable(), which the caller may
+	// provide in order to avoid an allocation. See NewSetWithSortable().
+	Sortable []KeyValue
+)
+
+var (
+	// keyValueType is used in computeDistinctReflect.
+	keyValueType = reflect.TypeOf(KeyValue{})
+
+	// emptySet is returned for empty attribute sets.
+	emptySet = &Set{
+		equivalent: Distinct{
+			iface: [0]KeyValue{},
+		},
+	}
+
+	// sortables is a pool of Sortables used to create Sets with a user does
+	// not provide one.
+	sortables = sync.Pool{
+		New: func() interface{} { return new(Sortable) },
+	}
+)
+
+// EmptySet returns a reference to a Set with no elements.
+//
+// This is a convenience provided for optimized calling utility.
+func EmptySet() *Set {
+	return emptySet
+}
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (d Distinct) reflectValue() reflect.Value {
+	return reflect.ValueOf(d.iface)
+}
+
+// Valid returns true if this value refers to a valid Set.
+func (d Distinct) Valid() bool {
+	return d.iface != nil
+}
+
+// Len returns the number of attributes in this set.
+func (l *Set) Len() int {
+	if l == nil || !l.equivalent.Valid() {
+		return 0
+	}
+	return l.equivalent.reflectValue().Len()
+}
+
+// Get returns the KeyValue at ordered position idx in this set.
+func (l *Set) Get(idx int) (KeyValue, bool) {
+	if l == nil || !l.equivalent.Valid() {
+		return KeyValue{}, false
+	}
+	value := l.equivalent.reflectValue()
+
+	if idx >= 0 && idx < value.Len() {
+		// Note: The Go compiler successfully avoids an allocation for
+		// the interface{} conversion here:
+		return value.Index(idx).Interface().(KeyValue), true
+	}
+
+	return KeyValue{}, false
+}
+
+// Value returns the value of a specified key in this set.
+func (l *Set) Value(k Key) (Value, bool) {
+	if l == nil || !l.equivalent.Valid() {
+		return Value{}, false
+	}
+	rValue := l.equivalent.reflectValue()
+	vlen := rValue.Len()
+
+	idx := sort.Search(vlen, func(idx int) bool {
+		return rValue.Index(idx).Interface().(KeyValue).Key >= k
+	})
+	if idx >= vlen {
+		return Value{}, false
+	}
+	keyValue := rValue.Index(idx).Interface().(KeyValue)
+	if k == keyValue.Key {
+		return keyValue.Value, true
+	}
+	return Value{}, false
+}
+
+// HasValue tests whether a key is defined in this set.
+func (l *Set) HasValue(k Key) bool {
+	if l == nil {
+		return false
+	}
+	_, ok := l.Value(k)
+	return ok
+}
+
+// Iter returns an iterator for visiting the attributes in this set.
+func (l *Set) Iter() Iterator {
+	return Iterator{
+		storage: l,
+		idx:     -1,
+	}
+}
+
+// ToSlice returns the set of attributes belonging to this set, sorted, where
+// keys appear no more than once.
+func (l *Set) ToSlice() []KeyValue {
+	iter := l.Iter()
+	return iter.ToSlice()
+}
+
+// Equivalent returns a value that may be used as a map key. The Distinct type
+// guarantees that the result will equal the equivalent. Distinct value of any
+// attribute set with the same elements as this, where sets are made unique by
+// choosing the last value in the input for any given key.
+func (l *Set) Equivalent() Distinct {
+	if l == nil || !l.equivalent.Valid() {
+		return emptySet.equivalent
+	}
+	return l.equivalent
+}
+
+// Equals returns true if the argument set is equivalent to this set.
+func (l *Set) Equals(o *Set) bool {
+	return l.Equivalent() == o.Equivalent()
+}
+
+// Encoded returns the encoded form of this set, according to encoder.
+func (l *Set) Encoded(encoder Encoder) string {
+	if l == nil || encoder == nil {
+		return ""
+	}
+
+	return encoder.Encode(l.Iter())
+}
+
+func empty() Set {
+	return Set{
+		equivalent: emptySet.equivalent,
+	}
+}
+
+// NewSet returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// Except for empty sets, this method adds an additional allocation compared
+// with calls that include a Sortable.
+func NewSet(kvs ...KeyValue) Set {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty()
+	}
+	srt := sortables.Get().(*Sortable)
+	s, _ := NewSetWithSortableFiltered(kvs, srt, nil)
+	sortables.Put(srt)
+	return s
+}
+
+// NewSetWithSortable returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Sortable option as a memory optimization.
+func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty()
+	}
+	s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
+	return s
+}
+
+// NewSetWithFiltered returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Filter to include/exclude attribute keys from the
+// return value. Excluded keys are returned as a slice of attribute values.
+func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty(), nil
+	}
+	srt := sortables.Get().(*Sortable)
+	s, filtered := NewSetWithSortableFiltered(kvs, srt, filter)
+	sortables.Put(srt)
+	return s, filtered
+}
+
+// NewSetWithSortableFiltered returns a new Set.
+//
+// Duplicate keys are eliminated by taking the last value.  This
+// re-orders the input slice so that unique last-values are contiguous
+// at the end of the slice.
+//
+// This ensures the following:
+//
+// - Last-value-wins semantics
+// - Caller sees the reordering, but doesn't lose values
+// - Repeated call preserve last-value wins.
+//
+// Note that methods are defined on Set, although this returns Set. Callers
+// can avoid memory allocations by:
+//
+// - allocating a Sortable for use as a temporary in this method
+// - allocating a Set for storing the return value of this constructor.
+//
+// The result maintains a cache of encoded attributes, by attribute.EncoderID.
+// This value should not be copied after its first use.
+//
+// The second []KeyValue return value is a list of attributes that were
+// excluded by the Filter (if non-nil).
+func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty(), nil
+	}
+
+	*tmp = kvs
+
+	// Stable sort so the following de-duplication can implement
+	// last-value-wins semantics.
+	sort.Stable(tmp)
+
+	*tmp = nil
+
+	position := len(kvs) - 1
+	offset := position - 1
+
+	// The requirements stated above require that the stable
+	// result be placed in the end of the input slice, while
+	// overwritten values are swapped to the beginning.
+	//
+	// De-duplicate with last-value-wins semantics.  Preserve
+	// duplicate values at the beginning of the input slice.
+	for ; offset >= 0; offset-- {
+		if kvs[offset].Key == kvs[position].Key {
+			continue
+		}
+		position--
+		kvs[offset], kvs[position] = kvs[position], kvs[offset]
+	}
+	kvs = kvs[position:]
+
+	if filter != nil {
+		if div := filteredToFront(kvs, filter); div != 0 {
+			return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+		}
+	}
+	return Set{equivalent: computeDistinct(kvs)}, nil
+}
+
+// filteredToFront filters slice in-place using keep function. All KeyValues that need to
+// be removed are moved to the front. All KeyValues that need to be kept are
+// moved (in-order) to the back. The index for the first KeyValue to be kept is
+// returned.
+func filteredToFront(slice []KeyValue, keep Filter) int {
+	n := len(slice)
+	j := n
+	for i := n - 1; i >= 0; i-- {
+		if keep(slice[i]) {
+			j--
+			slice[i], slice[j] = slice[j], slice[i]
+		}
+	}
+	return j
+}
+
+// Filter returns a filtered copy of this Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+func (l *Set) Filter(re Filter) (Set, []KeyValue) {
+	if re == nil {
+		return *l, nil
+	}
+
+	// Iterate in reverse to the first attribute that will be filtered out.
+	n := l.Len()
+	first := n - 1
+	for ; first >= 0; first-- {
+		kv, _ := l.Get(first)
+		if !re(kv) {
+			break
+		}
+	}
+
+	// No attributes will be dropped, return the immutable Set l and nil.
+	if first < 0 {
+		return *l, nil
+	}
+
+	// Copy now that we know we need to return a modified set.
+	//
+	// Do not do this in-place on the underlying storage of *Set l. Sets are
+	// immutable and filtering should not change this.
+	slice := l.ToSlice()
+
+	// Don't re-iterate the slice if only slice[0] is filtered.
+	if first == 0 {
+		// It is safe to assume len(slice) >= 1 given we found at least one
+		// attribute above that needs to be filtered out.
+		return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+	}
+
+	// Move the filtered slice[first] to the front (preserving order).
+	kv := slice[first]
+	copy(slice[1:first+1], slice[:first])
+	slice[0] = kv
+
+	// Do not re-evaluate re(slice[first+1:]).
+	div := filteredToFront(slice[1:first+1], re) + 1
+	return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
+}
+
+// computeDistinct returns a Distinct using either the fixed- or
+// reflect-oriented code path, depending on the size of the input. The input
+// slice is assumed to already be sorted and de-duplicated.
+func computeDistinct(kvs []KeyValue) Distinct {
+	iface := computeDistinctFixed(kvs)
+	if iface == nil {
+		iface = computeDistinctReflect(kvs)
+	}
+	return Distinct{
+		iface: iface,
+	}
+}
+
+// computeDistinctFixed computes a Distinct for small slices. It returns nil
+// if the input is too large for this code path.
+func computeDistinctFixed(kvs []KeyValue) interface{} {
+	switch len(kvs) {
+	case 1:
+		ptr := new([1]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 2:
+		ptr := new([2]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 3:
+		ptr := new([3]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 4:
+		ptr := new([4]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 5:
+		ptr := new([5]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 6:
+		ptr := new([6]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 7:
+		ptr := new([7]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 8:
+		ptr := new([8]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 9:
+		ptr := new([9]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 10:
+		ptr := new([10]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	default:
+		return nil
+	}
+}
+
+// computeDistinctReflect computes a Distinct using reflection, works for any
+// size input.
+func computeDistinctReflect(kvs []KeyValue) interface{} {
+	at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
+	for i, keyValue := range kvs {
+		*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
+	}
+	return at.Interface()
+}
+
+// MarshalJSON returns the JSON encoding of the Set.
+func (l *Set) MarshalJSON() ([]byte, error) {
+	return json.Marshal(l.equivalent.iface)
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Set.
+func (l Set) MarshalLog() interface{} {
+	kvs := make(map[string]string)
+	for _, kv := range l.ToSlice() {
+		kvs[string(kv.Key)] = kv.Value.Emit()
+	}
+	return kvs
+}
+
+// Len implements sort.Interface.
+func (l *Sortable) Len() int {
+	return len(*l)
+}
+
+// Swap implements sort.Interface.
+func (l *Sortable) Swap(i, j int) {
+	(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
+}
+
+// Less implements sort.Interface.
+func (l *Sortable) Less(i, j int) bool {
+	return (*l)[i].Key < (*l)[j].Key
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
new file mode 100644
index 0000000000000000000000000000000000000000..e584b24776bc8105fc0babb2c1871a451de47286
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -0,0 +1,31 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+package attribute
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[INVALID-0]
+	_ = x[BOOL-1]
+	_ = x[INT64-2]
+	_ = x[FLOAT64-3]
+	_ = x[STRING-4]
+	_ = x[BOOLSLICE-5]
+	_ = x[INT64SLICE-6]
+	_ = x[FLOAT64SLICE-7]
+	_ = x[STRINGSLICE-8]
+}
+
+const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
+
+var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
+
+func (i Type) String() string {
+	if i < 0 || i >= Type(len(_Type_index)-1) {
+		return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb21dd5c0961cd42cc16770246012cea8de2088e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -0,0 +1,270 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"go.opentelemetry.io/otel/internal"
+	"go.opentelemetry.io/otel/internal/attribute"
+)
+
+//go:generate stringer -type=Type
+
+// Type describes the type of the data Value holds.
+type Type int // nolint: revive  // redefines builtin Type.
+
+// Value represents the value part in key-value pairs.
+type Value struct {
+	vtype    Type
+	numeric  uint64
+	stringly string
+	slice    interface{}
+}
+
+const (
+	// INVALID is used for a Value with no value set.
+	INVALID Type = iota
+	// BOOL is a boolean Type Value.
+	BOOL
+	// INT64 is a 64-bit signed integral Type Value.
+	INT64
+	// FLOAT64 is a 64-bit floating point Type Value.
+	FLOAT64
+	// STRING is a string Type Value.
+	STRING
+	// BOOLSLICE is a slice of booleans Type Value.
+	BOOLSLICE
+	// INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
+	INT64SLICE
+	// FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
+	FLOAT64SLICE
+	// STRINGSLICE is a slice of strings Type Value.
+	STRINGSLICE
+)
+
+// BoolValue creates a BOOL Value.
+func BoolValue(v bool) Value {
+	return Value{
+		vtype:   BOOL,
+		numeric: internal.BoolToRaw(v),
+	}
+}
+
+// BoolSliceValue creates a BOOLSLICE Value.
+func BoolSliceValue(v []bool) Value {
+	return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
+}
+
+// IntValue creates an INT64 Value.
+func IntValue(v int) Value {
+	return Int64Value(int64(v))
+}
+
+// IntSliceValue creates an INTSLICE Value.
+func IntSliceValue(v []int) Value {
+	var int64Val int64
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
+	for i, val := range v {
+		cp.Elem().Index(i).SetInt(int64(val))
+	}
+	return Value{
+		vtype: INT64SLICE,
+		slice: cp.Elem().Interface(),
+	}
+}
+
+// Int64Value creates an INT64 Value.
+func Int64Value(v int64) Value {
+	return Value{
+		vtype:   INT64,
+		numeric: internal.Int64ToRaw(v),
+	}
+}
+
+// Int64SliceValue creates an INT64SLICE Value.
+func Int64SliceValue(v []int64) Value {
+	return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
+}
+
+// Float64Value creates a FLOAT64 Value.
+func Float64Value(v float64) Value {
+	return Value{
+		vtype:   FLOAT64,
+		numeric: internal.Float64ToRaw(v),
+	}
+}
+
+// Float64SliceValue creates a FLOAT64SLICE Value.
+func Float64SliceValue(v []float64) Value {
+	return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
+}
+
+// StringValue creates a STRING Value.
+func StringValue(v string) Value {
+	return Value{
+		vtype:    STRING,
+		stringly: v,
+	}
+}
+
+// StringSliceValue creates a STRINGSLICE Value.
+func StringSliceValue(v []string) Value {
+	return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
+}
+
+// Type returns a type of the Value.
+func (v Value) Type() Type {
+	return v.vtype
+}
+
+// AsBool returns the bool value. Make sure that the Value's type is
+// BOOL.
+func (v Value) AsBool() bool {
+	return internal.RawToBool(v.numeric)
+}
+
+// AsBoolSlice returns the []bool value. Make sure that the Value's type is
+// BOOLSLICE.
+func (v Value) AsBoolSlice() []bool {
+	if v.vtype != BOOLSLICE {
+		return nil
+	}
+	return v.asBoolSlice()
+}
+
+func (v Value) asBoolSlice() []bool {
+	return attribute.AsBoolSlice(v.slice)
+}
+
+// AsInt64 returns the int64 value. Make sure that the Value's type is
+// INT64.
+func (v Value) AsInt64() int64 {
+	return internal.RawToInt64(v.numeric)
+}
+
+// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
+// INT64SLICE.
+func (v Value) AsInt64Slice() []int64 {
+	if v.vtype != INT64SLICE {
+		return nil
+	}
+	return v.asInt64Slice()
+}
+
+func (v Value) asInt64Slice() []int64 {
+	return attribute.AsInt64Slice(v.slice)
+}
+
+// AsFloat64 returns the float64 value. Make sure that the Value's
+// type is FLOAT64.
+func (v Value) AsFloat64() float64 {
+	return internal.RawToFloat64(v.numeric)
+}
+
+// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
+// FLOAT64SLICE.
+func (v Value) AsFloat64Slice() []float64 {
+	if v.vtype != FLOAT64SLICE {
+		return nil
+	}
+	return v.asFloat64Slice()
+}
+
+func (v Value) asFloat64Slice() []float64 {
+	return attribute.AsFloat64Slice(v.slice)
+}
+
+// AsString returns the string value. Make sure that the Value's type
+// is STRING.
+func (v Value) AsString() string {
+	return v.stringly
+}
+
+// AsStringSlice returns the []string value. Make sure that the Value's type is
+// STRINGSLICE.
+func (v Value) AsStringSlice() []string {
+	if v.vtype != STRINGSLICE {
+		return nil
+	}
+	return v.asStringSlice()
+}
+
+func (v Value) asStringSlice() []string {
+	return attribute.AsStringSlice(v.slice)
+}
+
+type unknownValueType struct{}
+
+// AsInterface returns Value's data as interface{}.
+func (v Value) AsInterface() interface{} {
+	switch v.Type() {
+	case BOOL:
+		return v.AsBool()
+	case BOOLSLICE:
+		return v.asBoolSlice()
+	case INT64:
+		return v.AsInt64()
+	case INT64SLICE:
+		return v.asInt64Slice()
+	case FLOAT64:
+		return v.AsFloat64()
+	case FLOAT64SLICE:
+		return v.asFloat64Slice()
+	case STRING:
+		return v.stringly
+	case STRINGSLICE:
+		return v.asStringSlice()
+	}
+	return unknownValueType{}
+}
+
+// Emit returns a string representation of Value's data.
+func (v Value) Emit() string {
+	switch v.Type() {
+	case BOOLSLICE:
+		return fmt.Sprint(v.asBoolSlice())
+	case BOOL:
+		return strconv.FormatBool(v.AsBool())
+	case INT64SLICE:
+		return fmt.Sprint(v.asInt64Slice())
+	case INT64:
+		return strconv.FormatInt(v.AsInt64(), 10)
+	case FLOAT64SLICE:
+		return fmt.Sprint(v.asFloat64Slice())
+	case FLOAT64:
+		return fmt.Sprint(v.AsFloat64())
+	case STRINGSLICE:
+		return fmt.Sprint(v.asStringSlice())
+	case STRING:
+		return v.stringly
+	default:
+		return "unknown"
+	}
+}
+
+// MarshalJSON returns the JSON encoding of the Value.
+func (v Value) MarshalJSON() ([]byte, error) {
+	var jsonVal struct {
+		Type  string
+		Value interface{}
+	}
+	jsonVal.Type = v.Type().String()
+	jsonVal.Value = v.AsInterface()
+	return json.Marshal(jsonVal)
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d27cf77d5c9611e1b68e71d2ecc240c384afdbe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -0,0 +1,744 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"go.opentelemetry.io/otel/internal/baggage"
+)
+
+const (
+	maxMembers               = 180
+	maxBytesPerMembers       = 4096
+	maxBytesPerBaggageString = 8192
+
+	listDelimiter     = ","
+	keyValueDelimiter = "="
+	propertyDelimiter = ";"
+)
+
+var (
+	errInvalidKey      = errors.New("invalid key")
+	errInvalidValue    = errors.New("invalid value")
+	errInvalidProperty = errors.New("invalid baggage list-member property")
+	errInvalidMember   = errors.New("invalid baggage list-member")
+	errMemberNumber    = errors.New("too many list-members in baggage-string")
+	errMemberBytes     = errors.New("list-member too large")
+	errBaggageBytes    = errors.New("baggage-string too large")
+)
+
+// Property is an additional metadata entry for a baggage list-member.
+type Property struct {
+	key, value string
+
+	// hasValue indicates if a zero-value value means the property does not
+	// have a value or if it was the zero-value.
+	hasValue bool
+}
+
+// NewKeyProperty returns a new Property for key.
+//
+// If key is invalid, an error will be returned.
+func NewKeyProperty(key string) (Property, error) {
+	if !validateKey(key) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+
+	p := Property{key: key}
+	return p, nil
+}
+
+// NewKeyValueProperty returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be precent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewKeyValuePropertyRaw] instead
+// that does not require precent-encoding of the value.
+func NewKeyValueProperty(key, value string) (Property, error) {
+	if !validateValue(value) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
+	decodedValue, err := url.PathUnescape(value)
+	if err != nil {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
+	return NewKeyValuePropertyRaw(key, decodedValue)
+}
+
+// NewKeyValuePropertyRaw returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewKeyValuePropertyRaw(key, value string) (Property, error) {
+	if !validateKey(key) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+
+	p := Property{
+		key:      key,
+		value:    value,
+		hasValue: true,
+	}
+	return p, nil
+}
+
+func newInvalidProperty() Property {
+	return Property{}
+}
+
+// parseProperty attempts to decode a Property from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseProperty(property string) (Property, error) {
+	if property == "" {
+		return newInvalidProperty(), nil
+	}
+
+	p, ok := parsePropertyInternal(property)
+	if !ok {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
+	}
+
+	return p, nil
+}
+
+// validate ensures p conforms to the W3C Baggage specification, returning an
+// error otherwise.
+func (p Property) validate() error {
+	errFunc := func(err error) error {
+		return fmt.Errorf("invalid property: %w", err)
+	}
+
+	if !validateKey(p.key) {
+		return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
+	}
+	if !p.hasValue && p.value != "" {
+		return errFunc(errors.New("inconsistent value"))
+	}
+	return nil
+}
+
+// Key returns the Property key.
+func (p Property) Key() string {
+	return p.key
+}
+
+// Value returns the Property value. Additionally, a boolean value is returned
+// indicating if the returned value is the empty if the Property has a value
+// that is empty or if the value is not set.
+func (p Property) Value() (string, bool) {
+	return p.value, p.hasValue
+}
+
+// String encodes Property into a header string compliant with the W3C Baggage
+// specification.
+func (p Property) String() string {
+	if p.hasValue {
+		return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
+	}
+	return p.key
+}
+
+type properties []Property
+
+func fromInternalProperties(iProps []baggage.Property) properties {
+	if len(iProps) == 0 {
+		return nil
+	}
+
+	props := make(properties, len(iProps))
+	for i, p := range iProps {
+		props[i] = Property{
+			key:      p.Key,
+			value:    p.Value,
+			hasValue: p.HasValue,
+		}
+	}
+	return props
+}
+
+func (p properties) asInternal() []baggage.Property {
+	if len(p) == 0 {
+		return nil
+	}
+
+	iProps := make([]baggage.Property, len(p))
+	for i, prop := range p {
+		iProps[i] = baggage.Property{
+			Key:      prop.key,
+			Value:    prop.value,
+			HasValue: prop.hasValue,
+		}
+	}
+	return iProps
+}
+
+func (p properties) Copy() properties {
+	if len(p) == 0 {
+		return nil
+	}
+
+	props := make(properties, len(p))
+	copy(props, p)
+	return props
+}
+
+// validate ensures each Property in p conforms to the W3C Baggage
+// specification, returning an error otherwise.
+func (p properties) validate() error {
+	for _, prop := range p {
+		if err := prop.validate(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// String encodes properties into a header string compliant with the W3C Baggage
+// specification.
+func (p properties) String() string {
+	props := make([]string, len(p))
+	for i, prop := range p {
+		props[i] = prop.String()
+	}
+	return strings.Join(props, propertyDelimiter)
+}
+
+// Member is a list-member of a baggage-string as defined by the W3C Baggage
+// specification.
+type Member struct {
+	key, value string
+	properties properties
+
+	// hasData indicates whether the created property contains data or not.
+	// Properties that do not contain data are invalid with no other check
+	// required.
+	hasData bool
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be precent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewMemberRaw] instead
+// that does not require precent-encoding of the value.
+func NewMember(key, value string, props ...Property) (Member, error) {
+	if !validateValue(value) {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
+	decodedValue, err := url.PathUnescape(value)
+	if err != nil {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
+	return NewMemberRaw(key, decodedValue, props...)
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewMemberRaw(key, value string, props ...Property) (Member, error) {
+	m := Member{
+		key:        key,
+		value:      value,
+		properties: properties(props).Copy(),
+		hasData:    true,
+	}
+	if err := m.validate(); err != nil {
+		return newInvalidMember(), err
+	}
+	return m, nil
+}
+
+func newInvalidMember() Member {
+	return Member{}
+}
+
+// parseMember attempts to decode a Member from the passed string. It returns
+// an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseMember(member string) (Member, error) {
+	if n := len(member); n > maxBytesPerMembers {
+		return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
+	}
+
+	var props properties
+	keyValue, properties, found := strings.Cut(member, propertyDelimiter)
+	if found {
+		// Parse the member properties.
+		for _, pStr := range strings.Split(properties, propertyDelimiter) {
+			p, err := parseProperty(pStr)
+			if err != nil {
+				return newInvalidMember(), err
+			}
+			props = append(props, p)
+		}
+	}
+	// Parse the member key/value pair.
+
+	// Take into account a value can contain equal signs (=).
+	k, v, found := strings.Cut(keyValue, keyValueDelimiter)
+	if !found {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
+	}
+	// "Leading and trailing whitespaces are allowed but MUST be trimmed
+	// when converting the header into a data structure."
+	key := strings.TrimSpace(k)
+	if !validateKey(key) {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+
+	val := strings.TrimSpace(v)
+	if !validateValue(val) {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
+	}
+
+	// Decode a precent-encoded value.
+	value, err := url.PathUnescape(val)
+	if err != nil {
+		return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err)
+	}
+	return Member{key: key, value: value, properties: props, hasData: true}, nil
+}
+
+// validate ensures m conforms to the W3C Baggage specification.
+// A key must be an ASCII string, returning an error otherwise.
+func (m Member) validate() error {
+	if !m.hasData {
+		return fmt.Errorf("%w: %q", errInvalidMember, m)
+	}
+
+	if !validateKey(m.key) {
+		return fmt.Errorf("%w: %q", errInvalidKey, m.key)
+	}
+	return m.properties.validate()
+}
+
+// Key returns the Member key.
+func (m Member) Key() string { return m.key }
+
+// Value returns the Member value.
+func (m Member) Value() string { return m.value }
+
+// Properties returns a copy of the Member properties.
+func (m Member) Properties() []Property { return m.properties.Copy() }
+
+// String encodes Member into a header string compliant with the W3C Baggage
+// specification.
+func (m Member) String() string {
+	// A key is just an ASCII string. A value is restricted to be
+	// US-ASCII characters excluding CTLs, whitespace,
+	// DQUOTE, comma, semicolon, and backslash.
+	s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
+	if len(m.properties) > 0 {
+		s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
+	}
+	return s
+}
+
+// Baggage is a list of baggage members representing the baggage-string as
+// defined by the W3C Baggage specification.
+type Baggage struct { //nolint:golint
+	list baggage.List
+}
+
+// New returns a new valid Baggage. It returns an error if it results in a
+// Baggage exceeding limits set in that specification.
+//
+// It expects all the provided members to have already been validated.
+func New(members ...Member) (Baggage, error) {
+	if len(members) == 0 {
+		return Baggage{}, nil
+	}
+
+	b := make(baggage.List)
+	for _, m := range members {
+		if !m.hasData {
+			return Baggage{}, errInvalidMember
+		}
+
+		// OpenTelemetry resolves duplicates by last-one-wins.
+		b[m.key] = baggage.Item{
+			Value:      m.value,
+			Properties: m.properties.asInternal(),
+		}
+	}
+
+	// Check member numbers after deduplication.
+	if len(b) > maxMembers {
+		return Baggage{}, errMemberNumber
+	}
+
+	bag := Baggage{b}
+	if n := len(bag.String()); n > maxBytesPerBaggageString {
+		return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+	}
+
+	return bag, nil
+}
+
+// Parse attempts to decode a baggage-string from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+//
+// If there are duplicate list-members contained in baggage, the last one
+// defined (reading left-to-right) will be the only one kept. This diverges
+// from the W3C Baggage specification which allows duplicate list-members, but
+// conforms to the OpenTelemetry Baggage specification.
+func Parse(bStr string) (Baggage, error) {
+	if bStr == "" {
+		return Baggage{}, nil
+	}
+
+	if n := len(bStr); n > maxBytesPerBaggageString {
+		return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+	}
+
+	b := make(baggage.List)
+	for _, memberStr := range strings.Split(bStr, listDelimiter) {
+		m, err := parseMember(memberStr)
+		if err != nil {
+			return Baggage{}, err
+		}
+		// OpenTelemetry resolves duplicates by last-one-wins.
+		b[m.key] = baggage.Item{
+			Value:      m.value,
+			Properties: m.properties.asInternal(),
+		}
+	}
+
+	// OpenTelemetry does not allow for duplicate list-members, but the W3C
+	// specification does. Now that we have deduplicated, ensure the baggage
+	// does not exceed list-member limits.
+	if len(b) > maxMembers {
+		return Baggage{}, errMemberNumber
+	}
+
+	return Baggage{b}, nil
+}
+
+// Member returns the baggage list-member identified by key.
+//
+// If there is no list-member matching the passed key the returned Member will
+// be a zero-value Member.
+// The returned member is not validated, as we assume the validation happened
+// when it was added to the Baggage.
+func (b Baggage) Member(key string) Member {
+	v, ok := b.list[key]
+	if !ok {
+		// We do not need to worry about distinguishing between the situation
+		// where a zero-valued Member is included in the Baggage because a
+		// zero-valued Member is invalid according to the W3C Baggage
+		// specification (it has an empty key).
+		return newInvalidMember()
+	}
+
+	return Member{
+		key:        key,
+		value:      v.Value,
+		properties: fromInternalProperties(v.Properties),
+		hasData:    true,
+	}
+}
+
+// Members returns all the baggage list-members.
+// The order of the returned list-members does not have significance.
+//
+// The returned members are not validated, as we assume the validation happened
+// when they were added to the Baggage.
+func (b Baggage) Members() []Member {
+	if len(b.list) == 0 {
+		return nil
+	}
+
+	members := make([]Member, 0, len(b.list))
+	for k, v := range b.list {
+		members = append(members, Member{
+			key:        k,
+			value:      v.Value,
+			properties: fromInternalProperties(v.Properties),
+			hasData:    true,
+		})
+	}
+	return members
+}
+
+// SetMember returns a copy the Baggage with the member included. If the
+// baggage contains a Member with the same key the existing Member is
+// replaced.
+//
+// If member is invalid according to the W3C Baggage specification, an error
+// is returned with the original Baggage.
+func (b Baggage) SetMember(member Member) (Baggage, error) {
+	if !member.hasData {
+		return b, errInvalidMember
+	}
+
+	n := len(b.list)
+	if _, ok := b.list[member.key]; !ok {
+		n++
+	}
+	list := make(baggage.List, n)
+
+	for k, v := range b.list {
+		// Do not copy if we are just going to overwrite.
+		if k == member.key {
+			continue
+		}
+		list[k] = v
+	}
+
+	list[member.key] = baggage.Item{
+		Value:      member.value,
+		Properties: member.properties.asInternal(),
+	}
+
+	return Baggage{list: list}, nil
+}
+
+// DeleteMember returns a copy of the Baggage with the list-member identified
+// by key removed.
+func (b Baggage) DeleteMember(key string) Baggage {
+	n := len(b.list)
+	if _, ok := b.list[key]; ok {
+		n--
+	}
+	list := make(baggage.List, n)
+
+	for k, v := range b.list {
+		if k == key {
+			continue
+		}
+		list[k] = v
+	}
+
+	return Baggage{list: list}
+}
+
+// Len returns the number of list-members in the Baggage.
+func (b Baggage) Len() int {
+	return len(b.list)
+}
+
+// String encodes Baggage into a header string compliant with the W3C Baggage
+// specification.
+func (b Baggage) String() string {
+	members := make([]string, 0, len(b.list))
+	for k, v := range b.list {
+		members = append(members, Member{
+			key:        k,
+			value:      v.Value,
+			properties: fromInternalProperties(v.Properties),
+		}.String())
+	}
+	return strings.Join(members, listDelimiter)
+}
+
+// parsePropertyInternal attempts to decode a Property from the passed string.
+// It follows the spec at https://www.w3.org/TR/baggage/#definition.
+func parsePropertyInternal(s string) (p Property, ok bool) {
+	// For the entire function we will use "   key    =    value  " as an example.
+	// Attempting to parse the key.
+	// First skip spaces at the beginning "<   >key    =    value  " (they could be empty).
+	index := skipSpace(s, 0)
+
+	// Parse the key: "   <key>    =    value  ".
+	keyStart := index
+	keyEnd := index
+	for _, c := range s[keyStart:] {
+		if !validateKeyChar(c) {
+			break
+		}
+		keyEnd++
+	}
+
+	// If we couldn't find any valid key character,
+	// it means the key is either empty or invalid.
+	if keyStart == keyEnd {
+		return
+	}
+
+	// Skip spaces after the key: "   key<    >=    value  ".
+	index = skipSpace(s, keyEnd)
+
+	if index == len(s) {
+		// A key can have no value, like: "   key    ".
+		ok = true
+		p.key = s[keyStart:keyEnd]
+		return
+	}
+
+	// If we have not reached the end and we can't find the '=' delimiter,
+	// it means the property is invalid.
+	if s[index] != keyValueDelimiter[0] {
+		return
+	}
+
+	// Attempting to parse the value.
+	// Match: "   key    =<    >value  ".
+	index = skipSpace(s, index+1)
+
+	// Match the value string: "   key    =    <value>  ".
+	// A valid property can be: "   key    =".
+	// Therefore, we don't have to check if the value is empty.
+	valueStart := index
+	valueEnd := index
+	for _, c := range s[valueStart:] {
+		if !validateValueChar(c) {
+			break
+		}
+		valueEnd++
+	}
+
+	// Skip all trailing whitespaces: "   key    =    value<  >".
+	index = skipSpace(s, valueEnd)
+
+	// If after looking for the value and skipping whitespaces
+	// we have not reached the end, it means the property is
+	// invalid, something like: "   key    =    value  value1".
+	if index != len(s) {
+		return
+	}
+
+	// Decode a precent-encoded value.
+	value, err := url.PathUnescape(s[valueStart:valueEnd])
+	if err != nil {
+		return
+	}
+
+	ok = true
+	p.key = s[keyStart:keyEnd]
+	p.hasValue = true
+
+	p.value = value
+	return
+}
+
+func skipSpace(s string, offset int) int {
+	i := offset
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c != ' ' && c != '\t' {
+			break
+		}
+	}
+	return i
+}
+
+func validateKey(s string) bool {
+	if len(s) == 0 {
+		return false
+	}
+
+	for _, c := range s {
+		if !validateKeyChar(c) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func validateKeyChar(c int32) bool {
+	return (c >= 0x23 && c <= 0x27) ||
+		(c >= 0x30 && c <= 0x39) ||
+		(c >= 0x41 && c <= 0x5a) ||
+		(c >= 0x5e && c <= 0x7a) ||
+		c == 0x21 ||
+		c == 0x2a ||
+		c == 0x2b ||
+		c == 0x2d ||
+		c == 0x2e ||
+		c == 0x7c ||
+		c == 0x7e
+}
+
+func validateValue(s string) bool {
+	for _, c := range s {
+		if !validateValueChar(c) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func validateValueChar(c int32) bool {
+	return c == 0x21 ||
+		(c >= 0x23 && c <= 0x2b) ||
+		(c >= 0x2d && c <= 0x3a) ||
+		(c >= 0x3c && c <= 0x5b) ||
+		(c >= 0x5d && c <= 0x7e)
+}
+
+// valueEscape escapes the string so it can be safely placed inside a baggage value,
+// replacing special characters with %XX sequences as needed.
+//
+// The implementation is based on:
+// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
+func valueEscape(s string) string {
+	hexCount := 0
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if shouldEscape(c) {
+			hexCount++
+		}
+	}
+
+	if hexCount == 0 {
+		return s
+	}
+
+	var buf [64]byte
+	var t []byte
+
+	required := len(s) + 2*hexCount
+	if required <= len(buf) {
+		t = buf[:required]
+	} else {
+		t = make([]byte, required)
+	}
+
+	j := 0
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if shouldEscape(s[i]) {
+			const upperhex = "0123456789ABCDEF"
+			t[j] = '%'
+			t[j+1] = upperhex[c>>4]
+			t[j+2] = upperhex[c&15]
+			j += 3
+		} else {
+			t[j] = c
+			j++
+		}
+	}
+
+	return string(t)
+}
+
+// shouldEscape returns true if the specified byte should be escaped when
+// appearing in a baggage value string.
+func shouldEscape(c byte) bool {
+	if c == '%' {
+		// The percent character must be encoded so that percent-encoding can work.
+		return true
+	}
+	return !validateValueChar(int32(c))
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..24b34b7564a8327b5de573fe725d01686ebc6530
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/context.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/internal/baggage"
+)
+
+// ContextWithBaggage returns a copy of parent with baggage.
+func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
+	// Delegate so any hooks for the OpenTracing bridge are handled.
+	return baggage.ContextWithList(parent, b.list)
+}
+
+// ContextWithoutBaggage returns a copy of parent with no baggage.
+func ContextWithoutBaggage(parent context.Context) context.Context {
+	// Delegate so any hooks for the OpenTracing bridge are handled.
+	return baggage.ContextWithList(parent, nil)
+}
+
+// FromContext returns the baggage contained in ctx.
+func FromContext(ctx context.Context) Baggage {
+	// Delegate so any hooks for the OpenTracing bridge are handled.
+	return Baggage{list: baggage.ListFromContext(ctx)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4545100df67902df02eab970ff4686dc099f3703
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package baggage provides functionality for storing and retrieving
+baggage items in Go context. For propagating the baggage, see the
+go.opentelemetry.io/otel/propagation package.
+*/
+package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
new file mode 100644
index 0000000000000000000000000000000000000000..587ebae4e30e7fb9b789da5eb72f586c427f52c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package codes // import "go.opentelemetry.io/otel/codes"
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+)
+
+const (
+	// Unset is the default status code.
+	Unset Code = 0
+
+	// Error indicates the operation contains an error.
+	//
+	// NOTE: The error code in OTLP is 2.
+	// The value of this enum is only relevant to the internals
+	// of the Go SDK.
+	Error Code = 1
+
+	// Ok indicates operation has been validated by an Application developers
+	// or Operator to have completed successfully, or contain no error.
+	//
+	// NOTE: The Ok code in OTLP is 1.
+	// The value of this enum is only relevant to the internals
+	// of the Go SDK.
+	Ok Code = 2
+
+	maxCode = 3
+)
+
+// Code is an 32-bit representation of a status state.
+type Code uint32
+
+var codeToStr = map[Code]string{
+	Unset: "Unset",
+	Error: "Error",
+	Ok:    "Ok",
+}
+
+var strToCode = map[string]Code{
+	`"Unset"`: Unset,
+	`"Error"`: Error,
+	`"Ok"`:    Ok,
+}
+
+// String returns the Code as a string.
+func (c Code) String() string {
+	return codeToStr[c]
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+//
+// This is based on the functionality in the gRPC codes package:
+// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
+func (c *Code) UnmarshalJSON(b []byte) error {
+	// From json.Unmarshaler: By convention, to approximate the behavior of
+	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+	// a no-op.
+	if string(b) == "null" {
+		return nil
+	}
+	if c == nil {
+		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+	}
+
+	var x interface{}
+	if err := json.Unmarshal(b, &x); err != nil {
+		return err
+	}
+	switch x.(type) {
+	case string:
+		if jc, ok := strToCode[string(b)]; ok {
+			*c = jc
+			return nil
+		}
+		return fmt.Errorf("invalid code: %q", string(b))
+	case float64:
+		if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+			if ci >= maxCode {
+				return fmt.Errorf("invalid code: %q", ci)
+			}
+
+			*c = Code(ci)
+			return nil
+		}
+		return fmt.Errorf("invalid code: %q", string(b))
+	default:
+		return fmt.Errorf("invalid code: %q", string(b))
+	}
+}
+
+// MarshalJSON returns c as the JSON encoding of c.
+func (c *Code) MarshalJSON() ([]byte, error) {
+	if c == nil {
+		return []byte("null"), nil
+	}
+	str, ok := codeToStr[*c]
+	if !ok {
+		return nil, fmt.Errorf("invalid code: %d", *c)
+	}
+	return []byte(fmt.Sprintf("%q", str)), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4e328fbb4b3ab313c868ba93f8c9618ad40acd57
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/doc.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package codes defines the canonical error codes used by OpenTelemetry.
+
+It conforms to [the OpenTelemetry
+specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
+*/
+package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..36d7c24e88e7bbb9c136ccba5eca2742c1fadc94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otel provides global access to the OpenTelemetry API. The subpackages of
+the otel package provide an implementation of the OpenTelemetry API.
+
+The provided API is used to instrument code and measure data about that code's
+performance and operation. The measured data, by default, is not processed or
+transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
+default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
+exporters are used to process and transport this data.
+
+To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
+
+To read more about tracing, see go.opentelemetry.io/otel/trace.
+
+To read more about metrics, see go.opentelemetry.io/otel/metric.
+
+To read more about propagation, see go.opentelemetry.io/otel/propagation and
+go.opentelemetry.io/otel/baggage.
+*/
+package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..72fad85412beedcc8e707310d83cf225b893de66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/error_handler.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Handle handles any error deemed irremediable by an OpenTelemetry
+	// component.
+	Handle(error)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// ErrorHandlerFunc is a convenience adapter to allow the use of a function
+// as an ErrorHandler.
+type ErrorHandlerFunc func(error)
+
+var _ ErrorHandler = ErrorHandlerFunc(nil)
+
+// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
+func (f ErrorHandlerFunc) Handle(err error) {
+	f(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..16f9af12b66e8aa1ab5044c6e177909c171b87ae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
@@ -0,0 +1,211 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+	"context"
+	"time"
+
+	"google.golang.org/genproto/googleapis/rpc/errdetails"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+	colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+	metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+	metadata      metadata.MD
+	exportTimeout time.Duration
+	requestFunc   retry.RequestFunc
+
+	// ourConn keeps track of where conn was created: true if created here in
+	// NewClient, or false if passed with an option. This is important on
+	// Shutdown as the conn should only be closed if we created it. Otherwise,
+	// it is up to the processes that passed the conn to close it.
+	ourConn bool
+	conn    *grpc.ClientConn
+	msc     colmetricpb.MetricsServiceClient
+}
+
+// newClient creates a new gRPC metric client.
+func newClient(ctx context.Context, cfg oconf.Config) (*client, error) {
+	c := &client{
+		exportTimeout: cfg.Metrics.Timeout,
+		requestFunc:   cfg.RetryConfig.RequestFunc(retryable),
+		conn:          cfg.GRPCConn,
+	}
+
+	if len(cfg.Metrics.Headers) > 0 {
+		c.metadata = metadata.New(cfg.Metrics.Headers)
+	}
+
+	if c.conn == nil {
+		// If the caller did not provide a ClientConn when the client was
+		// created, create one using the configuration they did provide.
+		userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version()
+		dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
+		dialOpts = append(dialOpts, cfg.DialOptions...)
+
+		conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...)
+		if err != nil {
+			return nil, err
+		}
+		// Keep track that we own the lifecycle of this conn and need to close
+		// it on Shutdown.
+		c.ourConn = true
+		c.conn = conn
+	}
+
+	c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
+
+	return c, nil
+}
+
+// Shutdown shuts down the client, freeing all resource.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+func (c *client) Shutdown(ctx context.Context) error {
+	// The otlpmetric.Exporter synchronizes access to client methods and
+	// ensures this is called only once. The only thing that needs to be done
+	// here is to release any computational resources the client holds.
+
+	c.metadata = nil
+	c.requestFunc = nil
+	c.msc = nil
+
+	err := ctx.Err()
+	if c.ourConn {
+		closeErr := c.conn.Close()
+		// A context timeout error takes precedence over this error.
+		if err == nil && closeErr != nil {
+			err = closeErr
+		}
+	}
+	c.conn = nil
+	return err
+}
+
+// UploadMetrics sends protoMetrics to connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+	// The otlpmetric.Exporter synchronizes access to client methods, and
+	// ensures this is not called after the Exporter is shutdown. Only thing
+	// to do here is send data.
+
+	select {
+	case <-ctx.Done():
+		// Do not upload if the context is already expired.
+		return ctx.Err()
+	default:
+	}
+
+	ctx, cancel := c.exportContext(ctx)
+	defer cancel()
+
+	return c.requestFunc(ctx, func(iCtx context.Context) error {
+		resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
+			ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+		})
+		if resp != nil && resp.PartialSuccess != nil {
+			msg := resp.PartialSuccess.GetErrorMessage()
+			n := resp.PartialSuccess.GetRejectedDataPoints()
+			if n != 0 || msg != "" {
+				err := internal.MetricPartialSuccessError(n, msg)
+				otel.Handle(err)
+			}
+		}
+		// nil is converted to OK.
+		if status.Code(err) == codes.OK {
+			// Success.
+			return nil
+		}
+		return err
+	})
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function based on the clients configured export timeout.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+	var (
+		ctx    context.Context
+		cancel context.CancelFunc
+	)
+
+	if c.exportTimeout > 0 {
+		ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+	} else {
+		ctx, cancel = context.WithCancel(parent)
+	}
+
+	if c.metadata.Len() > 0 {
+		ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+	}
+
+	return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+	s := status.Convert(err)
+	return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+	switch s.Code() {
+	case codes.Canceled,
+		codes.DeadlineExceeded,
+		codes.Aborted,
+		codes.OutOfRange,
+		codes.Unavailable,
+		codes.DataLoss:
+		// Additionally, handle RetryInfo.
+		_, d := throttleDelay(s)
+		return true, d
+	case codes.ResourceExhausted:
+		// Retry only if the server signals that the recovery from resource exhaustion is possible.
+		return throttleDelay(s)
+	}
+
+	// Not a retry-able error.
+	return false, 0
+}
+
+// throttleDelay returns if the status is RetryInfo
+// and the duration to wait for if an explicit throttle time is included.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+	for _, detail := range s.Details() {
+		if t, ok := detail.(*errdetails.RetryInfo); ok {
+			return true, t.RetryDelay.AsDuration()
+		}
+	}
+	return false, 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..fbd495e7d7d6e1aca94cc80dd1ddb0a6e402990e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
@@ -0,0 +1,250 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+	"go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Option applies a configuration option to the Exporter.
+type Option interface {
+	applyGRPCOption(oconf.Config) oconf.Config
+}
+
+func asGRPCOptions(opts []Option) []oconf.GRPCOption {
+	converted := make([]oconf.GRPCOption, len(opts))
+	for i, o := range opts {
+		converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	oconf.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
+	return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the Exporter's gRPC
+// connection, just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+	return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+	return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+		cfg.ReconnectionPeriod = rp
+		return cfg
+	})}
+}
+
+func compressorToCompression(compressor string) oconf.Compression {
+	if compressor == "gzip" {
+		return oconf.GzipCompression
+	}
+
+	otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+	return oconf.NoCompression
+}
+
+// WithCompressor sets the compressor the gRPC client uses.
+// Supported compressor values: "gzip".
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compressor will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+	return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTLSCredentials sets the gRPC connection to use creds.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no TLS credentials will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+	return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+		cfg.Metrics.GRPCCredentials = creds
+		return cfg
+	})}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+	return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+		cfg.ServiceConfig = serviceConfig
+		return cfg
+	})}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when establishing a
+// gRPC connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+	return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+		cfg.DialOptions = opts
+		return cfg
+	})}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The Exporter
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+	return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+		cfg.GRPCConn = conn
+		return cfg
+	})}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+	return wrappedOption{oconf.WithRetry(retry.Config(settings))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+	return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+	return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..7be572a79d02eb157da4d9461f553c5f98a50873
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT.
+OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+the filepath  to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+  - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+  - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+  - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+  - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+  - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..826276ba392249997ca8202342413242df7850fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
@@ -0,0 +1,167 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using gRPC.
+type Exporter struct {
+	// Ensure synchronous access to the client across all functionality.
+	clientMu sync.Mutex
+	client   interface {
+		UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+		Shutdown(context.Context) error
+	}
+
+	temporalitySelector metric.TemporalitySelector
+	aggregationSelector metric.AggregationSelector
+
+	shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+	ts := cfg.Metrics.TemporalitySelector
+	if ts == nil {
+		ts = func(metric.InstrumentKind) metricdata.Temporality {
+			return metricdata.CumulativeTemporality
+		}
+	}
+
+	as := cfg.Metrics.AggregationSelector
+	if as == nil {
+		as = metric.DefaultAggregationSelector
+	}
+
+	return &Exporter{
+		client: c,
+
+		temporalitySelector: ts,
+		aggregationSelector: as,
+	}, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+	return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+	return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+	defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
+
+	otlpRm, err := transform.ResourceMetrics(rm)
+	// Best effort upload of transformable metrics.
+	e.clientMu.Lock()
+	upErr := e.client.UploadMetrics(ctx, otlpRm)
+	e.clientMu.Unlock()
+	if upErr != nil {
+		if err == nil {
+			return fmt.Errorf("failed to upload metrics: %w", upErr)
+		}
+		// Merge the two errors.
+		return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
+	}
+	return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+	// The exporter and client hold no state, nothing to flush.
+	return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+	err := errShutdown
+	e.shutdownOnce.Do(func() {
+		e.clientMu.Lock()
+		client := e.client
+		e.client = shutdownClient{}
+		e.clientMu.Unlock()
+		err = client.Shutdown(ctx)
+	})
+	return err
+}
+
+var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+	return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+	return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+	return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+	return struct{ Type string }{Type: "OTLP/gRPC"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using gRPC.
+//
+// If an already established gRPC ClientConn is not passed in options using
+// WithGRPCConn, a connection to the OTLP endpoint will be established based
+// on options. If a connection cannot be establishes in the lifetime of ctx,
+// an error will be returned.
+func New(ctx context.Context, options ...Option) (*Exporter, error) {
+	cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
+	c, err := newClient(ctx, cfg)
+	if err != nil {
+		return nil, err
+	}
+	return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..17951ceb451a1757a273cba8ccb5778e376b0749
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+	GetEnv    func(string) string
+	ReadFile  func(string) ([]byte, error)
+	Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+	for _, o := range opts {
+		o(e)
+	}
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+	return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(v)
+		}
+	}
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b := strings.ToLower(v) == "true"
+			fn(b)
+		}
+	}
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			d, err := strconv.Atoi(v)
+			if err != nil {
+				global.Error(err, "parse duration", "input", v)
+				return
+			}
+			fn(time.Duration(d) * time.Millisecond)
+		}
+	}
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(stringToHeader(v))
+		}
+	}
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			u, err := url.Parse(v)
+			if err != nil {
+				global.Error(err, "parse url", "input", v)
+				return
+			}
+			fn(u)
+		}
+	}
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b, err := e.ReadFile(v)
+			if err != nil {
+				global.Error(err, "read tls ca cert file", "file", v)
+				return
+			}
+			c, err := createCertPool(b)
+			if err != nil {
+				global.Error(err, "create tls cert pool")
+				return
+			}
+			fn(c)
+		}
+	}
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		vc, okc := e.GetEnvValue(nc)
+		vk, okk := e.GetEnvValue(nk)
+		if !okc || !okk {
+			return
+		}
+		cert, err := e.ReadFile(vc)
+		if err != nil {
+			global.Error(err, "read tls client cert", "file", vc)
+			return
+		}
+		key, err := e.ReadFile(vk)
+		if err != nil {
+			global.Error(err, "read tls client key", "file", vk)
+			return
+		}
+		crt, err := tls.X509KeyPair(cert, key)
+		if err != nil {
+			global.Error(err, "create tls client key pair")
+			return
+		}
+		fn(crt)
+	}
+}
+
+func keyWithNamespace(ns, key string) string {
+	if ns == "" {
+		return key
+	}
+	return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+	headersPairs := strings.Split(value, ",")
+	headers := make(map[string]string)
+
+	for _, header := range headersPairs {
+		n, v, found := strings.Cut(header, "=")
+		if !found {
+			global.Error(errors.New("missing '="), "parse headers", "input", header)
+			continue
+		}
+		name, err := url.PathUnescape(n)
+		if err != nil {
+			global.Error(err, "escape header key", "key", n)
+			continue
+		}
+		trimmedName := strings.TrimSpace(name)
+		value, err := url.PathUnescape(v)
+		if err != nil {
+			global.Error(err, "escape header value", "value", v)
+			continue
+		}
+		trimmedValue := strings.TrimSpace(value)
+
+		headers[trimmedName] = trimmedValue
+	}
+
+	return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+	return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..06718efa89869fdea1fd04b08d2f95ab70ba47e1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae100513bad3fb6455b1a656d35c14d506f9cf4a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
@@ -0,0 +1,221 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+	GetEnv:    os.Getenv,
+	ReadFile:  os.ReadFile,
+	Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+	return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+	opts := []GenericOption{}
+
+	tlsConf := &tls.Config{}
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Metrics.Endpoint = u.Host
+				// For OTLP/HTTP endpoint URLs without a per-signal
+				// configuration, the passed endpoint is used as a base URL
+				// and the signals are sent to these paths relative to that.
+				cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Metrics.Endpoint = u.Host
+				// For endpoint URLs for OTLP/HTTP per-signal variables, the
+				// URL MUST be used as-is without any modification. The only
+				// exception is that if an URL contains no path part, the root
+				// path / MUST be used.
+				path := u.Path
+				if path == "" {
+					path = "/"
+				}
+				cfg.Metrics.URLPath = path
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+		withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+	)
+
+	return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+	return func(cfg Config) Config {
+		// For OTLP/gRPC endpoints, this is the target to which the
+		// exporter is going to send telemetry.
+		cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+		return cfg
+	}
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			cp := NoCompression
+			if v == "gzip" {
+				cp = GzipCompression
+			}
+
+			fn(cp)
+		}
+	}
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+	switch strings.ToLower(u.Scheme) {
+	case "http", "unix":
+		return WithInsecure()
+	default:
+		return WithSecure()
+	}
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+	if b {
+		return WithInsecure()
+	}
+	return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if c.RootCAs != nil || len(c.Certificates) > 0 {
+			fn(c)
+		}
+	}
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if s, ok := e.GetEnvValue(n); ok {
+			switch strings.ToLower(s) {
+			case "cumulative":
+				fn(cumulativeTemporality)
+			case "delta":
+				fn(deltaTemporality)
+			case "lowmemory":
+				fn(lowMemory)
+			default:
+				global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+			}
+		}
+	}
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+	return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+	switch ik {
+	case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+		return metricdata.DeltaTemporality
+	default:
+		return metricdata.CumulativeTemporality
+	}
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+	switch ik {
+	case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+		return metricdata.DeltaTemporality
+	default:
+		return metricdata.CumulativeTemporality
+	}
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if s, ok := e.GetEnvValue(n); ok {
+			switch strings.ToLower(s) {
+			case "explicit_bucket_histogram":
+				fn(metric.DefaultAggregationSelector)
+			case "base2_exponential_bucket_histogram":
+				fn(func(kind metric.InstrumentKind) metric.Aggregation {
+					if kind == metric.InstrumentKindHistogram {
+						return metric.AggregationBase2ExponentialHistogram{
+							MaxSize:  160,
+							MaxScale: 20,
+							NoMinMax: false,
+						}
+					}
+					return metric.DefaultAggregationSelector(kind)
+				})
+			default:
+				global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+			}
+		}
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..a85f2712224332ef0396f895b3bada662c7215ed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
@@ -0,0 +1,353 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+	"crypto/tls"
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/encoding/gzip"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+	"go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+	// DefaultMaxAttempts describes how many times the driver
+	// should retry the sending of the payload in case of a
+	// retryable error.
+	DefaultMaxAttempts int = 5
+	// DefaultMetricsPath is a default URL path for endpoint that
+	// receives metrics.
+	DefaultMetricsPath string = "/v1/metrics"
+	// DefaultBackoff is a default base backoff time used in the
+	// exponential backoff strategy.
+	DefaultBackoff time.Duration = 300 * time.Millisecond
+	// DefaultTimeout is a default max waiting time for the backend to process
+	// each span or metrics batch.
+	DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+	SignalConfig struct {
+		Endpoint    string
+		Insecure    bool
+		TLSCfg      *tls.Config
+		Headers     map[string]string
+		Compression Compression
+		Timeout     time.Duration
+		URLPath     string
+
+		// gRPC configurations
+		GRPCCredentials credentials.TransportCredentials
+
+		TemporalitySelector metric.TemporalitySelector
+		AggregationSelector metric.AggregationSelector
+	}
+
+	Config struct {
+		// Signal specific configurations
+		Metrics SignalConfig
+
+		RetryConfig retry.Config
+
+		// gRPC configurations
+		ReconnectionPeriod time.Duration
+		ServiceConfig      string
+		DialOptions        []grpc.DialOption
+		GRPCConn           *grpc.ClientConn
+	}
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+	cfg := Config{
+		Metrics: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+			URLPath:     DefaultMetricsPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+
+			TemporalitySelector: metric.DefaultTemporalitySelector,
+			AggregationSelector: metric.DefaultAggregationSelector,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyHTTPEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+	return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+	cfg := Config{
+		Metrics: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+			URLPath:     DefaultMetricsPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+
+			TemporalitySelector: metric.DefaultTemporalitySelector,
+			AggregationSelector: metric.DefaultAggregationSelector,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyGRPCEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+
+	if cfg.ServiceConfig != "" {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+	}
+	// Priroritize GRPCCredentials over Insecure (passing both is an error).
+	if cfg.Metrics.GRPCCredentials != nil {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+	} else if cfg.Metrics.Insecure {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		// Default to using the host's root CA.
+		creds := credentials.NewTLS(nil)
+		cfg.Metrics.GRPCCredentials = creds
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+	}
+	if cfg.Metrics.Compression == GzipCompression {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+	}
+	if cfg.ReconnectionPeriod != 0 {
+		p := grpc.ConnectParams{
+			Backoff:           backoff.DefaultConfig,
+			MinConnectTimeout: cfg.ReconnectionPeriod,
+		}
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+	}
+
+	return cfg
+}
+
+type (
+	// GenericOption applies an option to the HTTP or gRPC driver.
+	GenericOption interface {
+		ApplyHTTPOption(Config) Config
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// HTTPOption applies an option to the HTTP driver.
+	HTTPOption interface {
+		ApplyHTTPOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// GRPCOption applies an option to the gRPC driver.
+	GRPCOption interface {
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+	fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+	return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+	httpFn func(Config) Config
+	grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+	return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+	return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+	fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+	return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+	fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+	return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Endpoint = endpoint
+		return cfg
+	})
+}
+
+func WithCompression(compression Compression) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Compression = compression
+		return cfg
+	})
+}
+
+func WithURLPath(urlPath string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.URLPath = urlPath
+		return cfg
+	})
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.RetryConfig = rc
+		return cfg
+	})
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+	return newSplitOption(func(cfg Config) Config {
+		cfg.Metrics.TLSCfg = tlsCfg.Clone()
+		return cfg
+	}, func(cfg Config) Config {
+		cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+		return cfg
+	})
+}
+
+func WithInsecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Insecure = true
+		return cfg
+	})
+}
+
+func WithSecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Insecure = false
+		return cfg
+	})
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Headers = headers
+		return cfg
+	})
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Timeout = duration
+		return cfg
+	})
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.TemporalitySelector = selector
+		return cfg
+	})
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.AggregationSelector = selector
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a3c8422e1bc1ad901fb77ed2e9c78af3c96a258
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
@@ -0,0 +1,58 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import "time"
+
+const (
+	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
+	DefaultCollectorGRPCPort uint16 = 4317
+	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
+	DefaultCollectorHTTPPort uint16 = 4318
+	// DefaultCollectorHost is the host address the Exporter will attempt
+	// connect to if no collector address is provided.
+	DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression Compression = iota
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+	// Enabled indicates whether to not retry sending batches in case of export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+	// consecutive retries will always be `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+	// Once this value is reached, the data is discarded.
+	MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e36e0b6f253d527eb53e48c9366bd06a6e07b2f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
@@ -0,0 +1,49 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certifate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+	b, err := os.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+
+	return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+
+	return &tls.Config{
+		RootCAs: cp,
+	}, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
new file mode 100644
index 0000000000000000000000000000000000000000..f4d48198251d055fd22ea2978d305fc869a07dc4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages.  Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+	ErrorMessage  string
+	RejectedItems int64
+	RejectedKind  string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+	msg := ps.ErrorMessage
+	if msg == "" {
+		msg = "empty message"
+	}
+	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+	_, ok := err.(PartialSuccess)
+	return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "spans",
+	}
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "metric data points",
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
new file mode 100644
index 0000000000000000000000000000000000000000..689779c3604392ef770c1268b7e8445c0750e9a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+	Enabled:         true,
+	InitialInterval: 5 * time.Second,
+	MaxInterval:     30 * time.Second,
+	MaxElapsedTime:  time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+	// Enabled indicates whether to not retry sending batches in case of
+	// export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before
+	// retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is
+	// reached the delay between consecutive retries will always be
+	// `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent
+	// trying to send a request/batch.  Once this value is reached, the data
+	// is discarded.
+	MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+	if !c.Enabled {
+		return func(ctx context.Context, fn func(context.Context) error) error {
+			return fn(ctx)
+		}
+	}
+
+	return func(ctx context.Context, fn func(context.Context) error) error {
+		// Do not use NewExponentialBackOff since it calls Reset and the code here
+		// must call Reset after changing the InitialInterval (this saves an
+		// unnecessary call to Now).
+		b := &backoff.ExponentialBackOff{
+			InitialInterval:     c.InitialInterval,
+			RandomizationFactor: backoff.DefaultRandomizationFactor,
+			Multiplier:          backoff.DefaultMultiplier,
+			MaxInterval:         c.MaxInterval,
+			MaxElapsedTime:      c.MaxElapsedTime,
+			Stop:                backoff.Stop,
+			Clock:               backoff.SystemClock,
+		}
+		b.Reset()
+
+		for {
+			err := fn(ctx)
+			if err == nil {
+				return nil
+			}
+
+			retryable, throttle := evaluate(err)
+			if !retryable {
+				return err
+			}
+
+			bOff := b.NextBackOff()
+			if bOff == backoff.Stop {
+				return fmt.Errorf("max retry time elapsed: %w", err)
+			}
+
+			// Wait for the greater of the backoff or throttle delay.
+			var delay time.Duration
+			if bOff > throttle {
+				delay = bOff
+			} else {
+				elapsed := b.GetElapsedTime()
+				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+					return fmt.Errorf("max retry time would elapse: %w", err)
+				}
+				delay = throttle
+			}
+
+			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+				return fmt.Errorf("%w: %s", ctxErr, err)
+			}
+		}
+	}
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait.  It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline.  This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+	timer := time.NewTimer(delay)
+	defer timer.Stop()
+
+	select {
+	case <-ctx.Done():
+		// Handle the case where the timer and context deadline end
+		// simultaneously by prioritizing the timer expiration nil value
+		// response.
+		select {
+		case <-timer.C:
+		default:
+			return ctx.Err()
+		}
+	case <-timer.C:
+	}
+
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
new file mode 100644
index 0000000000000000000000000000000000000000..e80798eebde4817b64d0a62fd30fb1610bd2e007
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
@@ -0,0 +1,155 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+	l := iter.Len()
+	if l == 0 {
+		return nil
+	}
+
+	out := make([]*cpb.KeyValue, 0, l)
+	for iter.Next() {
+		out = append(out, KeyValue(iter.Attribute()))
+	}
+	return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+	if len(attrs) == 0 {
+		return nil
+	}
+
+	out := make([]*cpb.KeyValue, 0, len(attrs))
+	for _, kv := range attrs {
+		out = append(out, KeyValue(kv))
+	}
+	return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+	return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+	av := new(cpb.AnyValue)
+	switch v.Type() {
+	case attribute.BOOL:
+		av.Value = &cpb.AnyValue_BoolValue{
+			BoolValue: v.AsBool(),
+		}
+	case attribute.BOOLSLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: boolSliceValues(v.AsBoolSlice()),
+			},
+		}
+	case attribute.INT64:
+		av.Value = &cpb.AnyValue_IntValue{
+			IntValue: v.AsInt64(),
+		}
+	case attribute.INT64SLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: int64SliceValues(v.AsInt64Slice()),
+			},
+		}
+	case attribute.FLOAT64:
+		av.Value = &cpb.AnyValue_DoubleValue{
+			DoubleValue: v.AsFloat64(),
+		}
+	case attribute.FLOAT64SLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: float64SliceValues(v.AsFloat64Slice()),
+			},
+		}
+	case attribute.STRING:
+		av.Value = &cpb.AnyValue_StringValue{
+			StringValue: v.AsString(),
+		}
+	case attribute.STRINGSLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: stringSliceValues(v.AsStringSlice()),
+			},
+		}
+	default:
+		av.Value = &cpb.AnyValue_StringValue{
+			StringValue: "INVALID",
+		}
+	}
+	return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_BoolValue{
+				BoolValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_IntValue{
+				IntValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_DoubleValue{
+				DoubleValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_StringValue{
+				StringValue: v,
+			},
+		}
+	}
+	return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5d2fdcb7ea70bf629642372a87afd250c2d15dd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
@@ -0,0 +1,114 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+	errUnknownAggregation = errors.New("unknown aggregation")
+	errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+	m   *mpb.Metric
+	err error
+}
+
+func (e errMetric) Unwrap() error {
+	return e.err
+}
+
+func (e errMetric) Error() string {
+	format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+	return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+	return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+	datatype string
+	errs     []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+	if len(e.errs) == 0 {
+		return nil
+	}
+	return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+	// Do not use errors.As here, this should only be flattened one layer. If
+	// there is a *multiErr several steps down the chain, all the errors above
+	// it will be discarded if errors.As is used instead.
+	switch other := err.(type) {
+	case *multiErr:
+		// Flatten err errors into e.
+		e.errs = append(e.errs, other.errs...)
+	default:
+		e.errs = append(e.errs, err)
+	}
+}
+
+func (e *multiErr) Error() string {
+	es := make([]string, len(e.errs))
+	for i, err := range e.errs {
+		es[i] = fmt.Sprintf("* %s", err)
+	}
+
+	format := "%d errors occurred transforming %s:\n\t%s"
+	return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+	switch len(e.errs) {
+	case 0:
+		return nil
+	case 1:
+		return e.errs[0]
+	}
+
+	// Return a multiErr without the leading error.
+	cp := &multiErr{
+		datatype: e.datatype,
+		errs:     make([]error, len(e.errs)-1),
+	}
+	copy(cp.errs, e.errs[1:])
+	return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+	if len(e.errs) == 0 {
+		return false
+	}
+	// Check if the first error is target.
+	return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
new file mode 100644
index 0000000000000000000000000000000000000000..00d5c74ad906e78af94644fde5848afd18db31c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
@@ -0,0 +1,292 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	cpb "go.opentelemetry.io/proto/otlp/common/v1"
+	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+	rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+	sms, err := ScopeMetrics(rm.ScopeMetrics)
+	return &mpb.ResourceMetrics{
+		Resource: &rpb.Resource{
+			Attributes: AttrIter(rm.Resource.Iter()),
+		},
+		ScopeMetrics: sms,
+		SchemaUrl:    rm.Resource.SchemaURL(),
+	}, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+	errs := &multiErr{datatype: "ScopeMetrics"}
+	out := make([]*mpb.ScopeMetrics, 0, len(sms))
+	for _, sm := range sms {
+		ms, err := Metrics(sm.Metrics)
+		if err != nil {
+			errs.append(err)
+		}
+
+		out = append(out, &mpb.ScopeMetrics{
+			Scope: &cpb.InstrumentationScope{
+				Name:    sm.Scope.Name,
+				Version: sm.Scope.Version,
+			},
+			Metrics:   ms,
+			SchemaUrl: sm.Scope.SchemaURL,
+		})
+	}
+	return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+	errs := &multiErr{datatype: "Metrics"}
+	out := make([]*mpb.Metric, 0, len(ms))
+	for _, m := range ms {
+		o, err := metric(m)
+		if err != nil {
+			// Do not include invalid data. Drop the metric, report the error.
+			errs.append(errMetric{m: o, err: err})
+			continue
+		}
+		out = append(out, o)
+	}
+	return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+	var err error
+	out := &mpb.Metric{
+		Name:        m.Name,
+		Description: m.Description,
+		Unit:        string(m.Unit),
+	}
+	switch a := m.Data.(type) {
+	case metricdata.Gauge[int64]:
+		out.Data = Gauge[int64](a)
+	case metricdata.Gauge[float64]:
+		out.Data = Gauge[float64](a)
+	case metricdata.Sum[int64]:
+		out.Data, err = Sum[int64](a)
+	case metricdata.Sum[float64]:
+		out.Data, err = Sum[float64](a)
+	case metricdata.Histogram[int64]:
+		out.Data, err = Histogram(a)
+	case metricdata.Histogram[float64]:
+		out.Data, err = Histogram(a)
+	case metricdata.ExponentialHistogram[int64]:
+		out.Data, err = ExponentialHistogram(a)
+	case metricdata.ExponentialHistogram[float64]:
+		out.Data, err = ExponentialHistogram(a)
+	default:
+		return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+	}
+	return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+	return &mpb.Metric_Gauge{
+		Gauge: &mpb.Gauge{
+			DataPoints: DataPoints(g.DataPoints),
+		},
+	}
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+	t, err := Temporality(s.Temporality)
+	if err != nil {
+		return nil, err
+	}
+	return &mpb.Metric_Sum{
+		Sum: &mpb.Sum{
+			AggregationTemporality: t,
+			IsMonotonic:            s.IsMonotonic,
+			DataPoints:             DataPoints(s.DataPoints),
+		},
+	}, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+	out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+	for _, dPt := range dPts {
+		ndp := &mpb.NumberDataPoint{
+			Attributes:        AttrIter(dPt.Attributes.Iter()),
+			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+			TimeUnixNano:      timeUnixNano(dPt.Time),
+		}
+		switch v := any(dPt.Value).(type) {
+		case int64:
+			ndp.Value = &mpb.NumberDataPoint_AsInt{
+				AsInt: v,
+			}
+		case float64:
+			ndp.Value = &mpb.NumberDataPoint_AsDouble{
+				AsDouble: v,
+			}
+		}
+		out = append(out, ndp)
+	}
+	return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+	t, err := Temporality(h.Temporality)
+	if err != nil {
+		return nil, err
+	}
+	return &mpb.Metric_Histogram{
+		Histogram: &mpb.Histogram{
+			AggregationTemporality: t,
+			DataPoints:             HistogramDataPoints(h.DataPoints),
+		},
+	}, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+	out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+	for _, dPt := range dPts {
+		sum := float64(dPt.Sum)
+		hdp := &mpb.HistogramDataPoint{
+			Attributes:        AttrIter(dPt.Attributes.Iter()),
+			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+			TimeUnixNano:      timeUnixNano(dPt.Time),
+			Count:             dPt.Count,
+			Sum:               &sum,
+			BucketCounts:      dPt.BucketCounts,
+			ExplicitBounds:    dPt.Bounds,
+		}
+		if v, ok := dPt.Min.Value(); ok {
+			vF64 := float64(v)
+			hdp.Min = &vF64
+		}
+		if v, ok := dPt.Max.Value(); ok {
+			vF64 := float64(v)
+			hdp.Max = &vF64
+		}
+		out = append(out, hdp)
+	}
+	return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+	t, err := Temporality(h.Temporality)
+	if err != nil {
+		return nil, err
+	}
+	return &mpb.Metric_ExponentialHistogram{
+		ExponentialHistogram: &mpb.ExponentialHistogram{
+			AggregationTemporality: t,
+			DataPoints:             ExponentialHistogramDataPoints(h.DataPoints),
+		},
+	}, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+	out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+	for _, dPt := range dPts {
+		sum := float64(dPt.Sum)
+		ehdp := &mpb.ExponentialHistogramDataPoint{
+			Attributes:        AttrIter(dPt.Attributes.Iter()),
+			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+			TimeUnixNano:      timeUnixNano(dPt.Time),
+			Count:             dPt.Count,
+			Sum:               &sum,
+			Scale:             dPt.Scale,
+			ZeroCount:         dPt.ZeroCount,
+
+			Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+			Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+		}
+		if v, ok := dPt.Min.Value(); ok {
+			vF64 := float64(v)
+			ehdp.Min = &vF64
+		}
+		if v, ok := dPt.Max.Value(); ok {
+			vF64 := float64(v)
+			ehdp.Max = &vF64
+		}
+		out = append(out, ehdp)
+	}
+	return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+	return &mpb.ExponentialHistogramDataPoint_Buckets{
+		Offset:       bucket.Offset,
+		BucketCounts: bucket.Counts,
+	}
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+	switch t {
+	case metricdata.DeltaTemporality:
+		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+	case metricdata.CumulativeTemporality:
+		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+	default:
+		err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+	}
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+	if t.IsZero() {
+		return 0
+	}
+	return uint64(t.UnixNano())
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..60096200d392c560eb8ff509e96c8fdcac60381d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
+func Version() string {
+	return "0.45.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..73463c91d5f426cffe6b2f5616c6396e2be606f5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
@@ -0,0 +1,307 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+	"bytes"
+	"compress/gzip"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"sync"
+	"time"
+
+	"google.golang.org/protobuf/proto"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+	colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+	metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+	// req is cloned for every upload the client makes.
+	req         *http.Request
+	compression Compression
+	requestFunc retry.RequestFunc
+	httpClient  *http.Client
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by another package.
+var ourTransport = &http.Transport{
+	Proxy: http.ProxyFromEnvironment,
+	DialContext: (&net.Dialer{
+		Timeout:   30 * time.Second,
+		KeepAlive: 30 * time.Second,
+	}).DialContext,
+	ForceAttemptHTTP2:     true,
+	MaxIdleConns:          100,
+	IdleConnTimeout:       90 * time.Second,
+	TLSHandshakeTimeout:   10 * time.Second,
+	ExpectContinueTimeout: 1 * time.Second,
+}
+
+// newClient creates a new HTTP metric client.
+func newClient(cfg oconf.Config) (*client, error) {
+	httpClient := &http.Client{
+		Transport: ourTransport,
+		Timeout:   cfg.Metrics.Timeout,
+	}
+	if cfg.Metrics.TLSCfg != nil {
+		transport := ourTransport.Clone()
+		transport.TLSClientConfig = cfg.Metrics.TLSCfg
+		httpClient.Transport = transport
+	}
+
+	u := &url.URL{
+		Scheme: "https",
+		Host:   cfg.Metrics.Endpoint,
+		Path:   cfg.Metrics.URLPath,
+	}
+	if cfg.Metrics.Insecure {
+		u.Scheme = "http"
+	}
+	// Body is set when this is cloned during upload.
+	req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody)
+	if err != nil {
+		return nil, err
+	}
+
+	userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version()
+	req.Header.Set("User-Agent", userAgent)
+
+	if n := len(cfg.Metrics.Headers); n > 0 {
+		for k, v := range cfg.Metrics.Headers {
+			req.Header.Set(k, v)
+		}
+	}
+	req.Header.Set("Content-Type", "application/x-protobuf")
+
+	return &client{
+		compression: Compression(cfg.Metrics.Compression),
+		req:         req,
+		requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+		httpClient:  httpClient,
+	}, nil
+}
+
+// Shutdown shuts down the client, freeing all resources.
+func (c *client) Shutdown(ctx context.Context) error {
+	// The otlpmetric.Exporter synchronizes access to client methods and
+	// ensures this is called only once. The only thing that needs to be done
+	// here is to release any computational resources the client holds.
+
+	c.requestFunc = nil
+	c.httpClient = nil
+	return ctx.Err()
+}
+
+// UploadMetrics sends protoMetrics to the connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+	// The otlpmetric.Exporter synchronizes access to client methods, and
+	// ensures this is not called after the Exporter is shutdown. Only thing
+	// to do here is send data.
+
+	pbRequest := &colmetricpb.ExportMetricsServiceRequest{
+		ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+	}
+	body, err := proto.Marshal(pbRequest)
+	if err != nil {
+		return err
+	}
+	request, err := c.newRequest(ctx, body)
+	if err != nil {
+		return err
+	}
+
+	return c.requestFunc(ctx, func(iCtx context.Context) error {
+		select {
+		case <-iCtx.Done():
+			return iCtx.Err()
+		default:
+		}
+
+		request.reset(iCtx)
+		resp, err := c.httpClient.Do(request.Request)
+		var urlErr *url.Error
+		if errors.As(err, &urlErr) && urlErr.Temporary() {
+			return newResponseError(http.Header{})
+		}
+		if err != nil {
+			return err
+		}
+
+		var rErr error
+		switch sc := resp.StatusCode; {
+		case sc >= 200 && sc <= 299:
+			// Success, do not retry.
+
+			// Read the partial success message, if any.
+			var respData bytes.Buffer
+			if _, err := io.Copy(&respData, resp.Body); err != nil {
+				return err
+			}
+			if respData.Len() == 0 {
+				return nil
+			}
+
+			if resp.Header.Get("Content-Type") == "application/x-protobuf" {
+				var respProto colmetricpb.ExportMetricsServiceResponse
+				if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+					return err
+				}
+
+				if respProto.PartialSuccess != nil {
+					msg := respProto.PartialSuccess.GetErrorMessage()
+					n := respProto.PartialSuccess.GetRejectedDataPoints()
+					if n != 0 || msg != "" {
+						err := internal.MetricPartialSuccessError(n, msg)
+						otel.Handle(err)
+					}
+				}
+			}
+			return nil
+		case sc == http.StatusTooManyRequests,
+			sc == http.StatusBadGateway,
+			sc == http.StatusServiceUnavailable,
+			sc == http.StatusGatewayTimeout:
+			// Retry-able failure.
+			rErr = newResponseError(resp.Header)
+
+			// Going to retry, drain the body to reuse the connection.
+			if _, err := io.Copy(io.Discard, resp.Body); err != nil {
+				_ = resp.Body.Close()
+				return err
+			}
+		default:
+			rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status)
+		}
+
+		if err := resp.Body.Close(); err != nil {
+			return err
+		}
+		return rErr
+	})
+}
+
+var gzPool = sync.Pool{
+	New: func() interface{} {
+		w := gzip.NewWriter(io.Discard)
+		return w
+	},
+}
+
+func (c *client) newRequest(ctx context.Context, body []byte) (request, error) {
+	r := c.req.Clone(ctx)
+	req := request{Request: r}
+
+	switch c.compression {
+	case NoCompression:
+		r.ContentLength = (int64)(len(body))
+		req.bodyReader = bodyReader(body)
+	case GzipCompression:
+		// Ensure the content length is not used.
+		r.ContentLength = -1
+		r.Header.Set("Content-Encoding", "gzip")
+
+		gz := gzPool.Get().(*gzip.Writer)
+		defer gzPool.Put(gz)
+
+		var b bytes.Buffer
+		gz.Reset(&b)
+
+		if _, err := gz.Write(body); err != nil {
+			return req, err
+		}
+		// Close needs to be called to ensure body if fully written.
+		if err := gz.Close(); err != nil {
+			return req, err
+		}
+
+		req.bodyReader = bodyReader(b.Bytes())
+	}
+
+	return req, nil
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+	return func() io.ReadCloser {
+		return io.NopCloser(bytes.NewReader(buf))
+	}
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+	*http.Request
+
+	// bodyReader allows the same body to be used for multiple requests.
+	bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+	r.Body = r.bodyReader()
+	r.Request = r.Request.WithContext(ctx)
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+	throttle int64
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers.
+func newResponseError(header http.Header) error {
+	var rErr retryableError
+	if v := header.Get("Retry-After"); v != "" {
+		if t, err := strconv.ParseInt(v, 10, 64); err == nil {
+			rErr.throttle = t
+		}
+	}
+	return rErr
+}
+
+func (e retryableError) Error() string {
+	return "retry-able request failure"
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+	if err == nil {
+		return false, 0
+	}
+
+	rErr, ok := err.(retryableError)
+	if !ok {
+		return false, 0
+	}
+
+	return true, time.Duration(rErr.throttle)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..6eae3e1bbdac089ad9a50a870c83689b88ac4a0f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+	"crypto/tls"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+	"go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression oconf.Compression
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression = Compression(oconf.NoCompression)
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression = Compression(oconf.GzipCompression)
+)
+
+// Option applies an option to the Exporter.
+type Option interface {
+	applyHTTPOption(oconf.Config) oconf.Config
+}
+
+func asHTTPOptions(opts []Option) []oconf.HTTPOption {
+	converted := make([]oconf.HTTPOption, len(opts))
+	for i, o := range opts {
+		converted[i] = oconf.NewHTTPOption(o.applyHTTPOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	oconf.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config {
+	return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to. This
+// endpoint is specified as a host and optional port, no path or scheme should
+// be included (see WithInsecure and WithURLPath).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithCompression sets the compression strategy the Exporter will use to
+// compress the HTTP body.
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compression strategy will be used.
+func WithCompression(compression Compression) Option {
+	return wrappedOption{oconf.WithCompression(oconf.Compression(compression))}
+}
+
+// WithURLPath sets the URL path the Exporter will send requests to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, the path
+// contained in that variable value will be used. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "/v1/metrics" will be used.
+func WithURLPath(urlPath string) Option {
+	return wrappedOption{oconf.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig sets the TLS configuration the Exporter will use for
+// HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, the system default configuration is used.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+	return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure disables client transport security for the Exporter's HTTP
+// connection.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+func WithInsecure() Option {
+	return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithHeaders will send the provided headers with each HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+	return wrappedOption{oconf.WithRetry(retry.Config(rc))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+	return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+	return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..94f8b250d3fe899358092010b30df1f2b778b204
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
@@ -0,0 +1,93 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/metrics.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+environment variable and by [WithEndpoint], [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+compression strategy the exporter uses to compress the HTTP body.
+Supported values: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+filepath to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+  - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+  - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+  - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+  - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+  - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..96991ede5c7cfac3571b6d7fac82544a54afe9f5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
@@ -0,0 +1,162 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP.
+type Exporter struct {
+	// Ensure synchronous access to the client across all functionality.
+	clientMu sync.Mutex
+	client   interface {
+		UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+		Shutdown(context.Context) error
+	}
+
+	temporalitySelector metric.TemporalitySelector
+	aggregationSelector metric.AggregationSelector
+
+	shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+	ts := cfg.Metrics.TemporalitySelector
+	if ts == nil {
+		ts = func(metric.InstrumentKind) metricdata.Temporality {
+			return metricdata.CumulativeTemporality
+		}
+	}
+
+	as := cfg.Metrics.AggregationSelector
+	if as == nil {
+		as = metric.DefaultAggregationSelector
+	}
+
+	return &Exporter{
+		client: c,
+
+		temporalitySelector: ts,
+		aggregationSelector: as,
+	}, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+	return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+	return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+	defer global.Debug("OTLP/HTTP exporter export", "Data", rm)
+
+	otlpRm, err := transform.ResourceMetrics(rm)
+	// Best effort upload of transformable metrics.
+	e.clientMu.Lock()
+	upErr := e.client.UploadMetrics(ctx, otlpRm)
+	e.clientMu.Unlock()
+	if upErr != nil {
+		if err == nil {
+			return fmt.Errorf("failed to upload metrics: %w", upErr)
+		}
+		// Merge the two errors.
+		return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
+	}
+	return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+	// The exporter and client hold no state, nothing to flush.
+	return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+	err := errShutdown
+	e.shutdownOnce.Do(func() {
+		e.clientMu.Lock()
+		client := e.client
+		e.client = shutdownClient{}
+		e.clientMu.Unlock()
+		err = client.Shutdown(ctx)
+	})
+	return err
+}
+
+var errShutdown = fmt.Errorf("HTTP exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+	return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+	return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+	return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+	return struct{ Type string }{Type: "OTLP/HTTP"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using protobufs over HTTP.
+func New(_ context.Context, opts ...Option) (*Exporter, error) {
+	cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...)
+	c, err := newClient(cfg)
+	if err != nil {
+		return nil, err
+	}
+	return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..9dfb55c41bb69d59f217546d8bfc3ad674336150
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+	GetEnv    func(string) string
+	ReadFile  func(string) ([]byte, error)
+	Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+	for _, o := range opts {
+		o(e)
+	}
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+	return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(v)
+		}
+	}
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b := strings.ToLower(v) == "true"
+			fn(b)
+		}
+	}
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			d, err := strconv.Atoi(v)
+			if err != nil {
+				global.Error(err, "parse duration", "input", v)
+				return
+			}
+			fn(time.Duration(d) * time.Millisecond)
+		}
+	}
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(stringToHeader(v))
+		}
+	}
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			u, err := url.Parse(v)
+			if err != nil {
+				global.Error(err, "parse url", "input", v)
+				return
+			}
+			fn(u)
+		}
+	}
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b, err := e.ReadFile(v)
+			if err != nil {
+				global.Error(err, "read tls ca cert file", "file", v)
+				return
+			}
+			c, err := createCertPool(b)
+			if err != nil {
+				global.Error(err, "create tls cert pool")
+				return
+			}
+			fn(c)
+		}
+	}
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		vc, okc := e.GetEnvValue(nc)
+		vk, okk := e.GetEnvValue(nk)
+		if !okc || !okk {
+			return
+		}
+		cert, err := e.ReadFile(vc)
+		if err != nil {
+			global.Error(err, "read tls client cert", "file", vc)
+			return
+		}
+		key, err := e.ReadFile(vk)
+		if err != nil {
+			global.Error(err, "read tls client key", "file", vk)
+			return
+		}
+		crt, err := tls.X509KeyPair(cert, key)
+		if err != nil {
+			global.Error(err, "create tls client key pair")
+			return
+		}
+		fn(crt)
+	}
+}
+
+func keyWithNamespace(ns, key string) string {
+	if ns == "" {
+		return key
+	}
+	return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+	headersPairs := strings.Split(value, ",")
+	headers := make(map[string]string)
+
+	for _, header := range headersPairs {
+		n, v, found := strings.Cut(header, "=")
+		if !found {
+			global.Error(errors.New("missing '="), "parse headers", "input", header)
+			continue
+		}
+		name, err := url.PathUnescape(n)
+		if err != nil {
+			global.Error(err, "escape header key", "key", n)
+			continue
+		}
+		trimmedName := strings.TrimSpace(name)
+		value, err := url.PathUnescape(v)
+		if err != nil {
+			global.Error(err, "escape header value", "value", v)
+			continue
+		}
+		trimmedValue := strings.TrimSpace(value)
+
+		headers[trimmedName] = trimmedValue
+	}
+
+	return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+	return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..002c8b36b2e9b3ca4ef68fd8ae9fde1fd914d6df
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..2bab35be6c4cf5b8cea0fa369910361812f8b06c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
@@ -0,0 +1,221 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+	GetEnv:    os.Getenv,
+	ReadFile:  os.ReadFile,
+	Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+	return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+	opts := []GenericOption{}
+
+	tlsConf := &tls.Config{}
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Metrics.Endpoint = u.Host
+				// For OTLP/HTTP endpoint URLs without a per-signal
+				// configuration, the passed endpoint is used as a base URL
+				// and the signals are sent to these paths relative to that.
+				cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Metrics.Endpoint = u.Host
+				// For endpoint URLs for OTLP/HTTP per-signal variables, the
+				// URL MUST be used as-is without any modification. The only
+				// exception is that if an URL contains no path part, the root
+				// path / MUST be used.
+				path := u.Path
+				if path == "" {
+					path = "/"
+				}
+				cfg.Metrics.URLPath = path
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+		withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+	)
+
+	return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+	return func(cfg Config) Config {
+		// For OTLP/gRPC endpoints, this is the target to which the
+		// exporter is going to send telemetry.
+		cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+		return cfg
+	}
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			cp := NoCompression
+			if v == "gzip" {
+				cp = GzipCompression
+			}
+
+			fn(cp)
+		}
+	}
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+	switch strings.ToLower(u.Scheme) {
+	case "http", "unix":
+		return WithInsecure()
+	default:
+		return WithSecure()
+	}
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+	if b {
+		return WithInsecure()
+	}
+	return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if c.RootCAs != nil || len(c.Certificates) > 0 {
+			fn(c)
+		}
+	}
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if s, ok := e.GetEnvValue(n); ok {
+			switch strings.ToLower(s) {
+			case "cumulative":
+				fn(cumulativeTemporality)
+			case "delta":
+				fn(deltaTemporality)
+			case "lowmemory":
+				fn(lowMemory)
+			default:
+				global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+			}
+		}
+	}
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+	return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+	switch ik {
+	case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+		return metricdata.DeltaTemporality
+	default:
+		return metricdata.CumulativeTemporality
+	}
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+	switch ik {
+	case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+		return metricdata.DeltaTemporality
+	default:
+		return metricdata.CumulativeTemporality
+	}
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if s, ok := e.GetEnvValue(n); ok {
+			switch strings.ToLower(s) {
+			case "explicit_bucket_histogram":
+				fn(metric.DefaultAggregationSelector)
+			case "base2_exponential_bucket_histogram":
+				fn(func(kind metric.InstrumentKind) metric.Aggregation {
+					if kind == metric.InstrumentKindHistogram {
+						return metric.AggregationBase2ExponentialHistogram{
+							MaxSize:  160,
+							MaxScale: 20,
+							NoMinMax: false,
+						}
+					}
+					return metric.DefaultAggregationSelector(kind)
+				})
+			default:
+				global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+			}
+		}
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..59468b9a5ed6c978919b8af7e8b70ccd0ad16406
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
@@ -0,0 +1,353 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+	"crypto/tls"
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/encoding/gzip"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+	"go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+	// DefaultMaxAttempts describes how many times the driver
+	// should retry the sending of the payload in case of a
+	// retryable error.
+	DefaultMaxAttempts int = 5
+	// DefaultMetricsPath is a default URL path for endpoint that
+	// receives metrics.
+	DefaultMetricsPath string = "/v1/metrics"
+	// DefaultBackoff is a default base backoff time used in the
+	// exponential backoff strategy.
+	DefaultBackoff time.Duration = 300 * time.Millisecond
+	// DefaultTimeout is a default max waiting time for the backend to process
+	// each span or metrics batch.
+	DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+	SignalConfig struct {
+		Endpoint    string
+		Insecure    bool
+		TLSCfg      *tls.Config
+		Headers     map[string]string
+		Compression Compression
+		Timeout     time.Duration
+		URLPath     string
+
+		// gRPC configurations
+		GRPCCredentials credentials.TransportCredentials
+
+		TemporalitySelector metric.TemporalitySelector
+		AggregationSelector metric.AggregationSelector
+	}
+
+	Config struct {
+		// Signal specific configurations
+		Metrics SignalConfig
+
+		RetryConfig retry.Config
+
+		// gRPC configurations
+		ReconnectionPeriod time.Duration
+		ServiceConfig      string
+		DialOptions        []grpc.DialOption
+		GRPCConn           *grpc.ClientConn
+	}
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+	cfg := Config{
+		Metrics: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+			URLPath:     DefaultMetricsPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+
+			TemporalitySelector: metric.DefaultTemporalitySelector,
+			AggregationSelector: metric.DefaultAggregationSelector,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyHTTPEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+	return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+	cfg := Config{
+		Metrics: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+			URLPath:     DefaultMetricsPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+
+			TemporalitySelector: metric.DefaultTemporalitySelector,
+			AggregationSelector: metric.DefaultAggregationSelector,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyGRPCEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+
+	if cfg.ServiceConfig != "" {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+	}
+	// Priroritize GRPCCredentials over Insecure (passing both is an error).
+	if cfg.Metrics.GRPCCredentials != nil {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+	} else if cfg.Metrics.Insecure {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		// Default to using the host's root CA.
+		creds := credentials.NewTLS(nil)
+		cfg.Metrics.GRPCCredentials = creds
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+	}
+	if cfg.Metrics.Compression == GzipCompression {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+	}
+	if cfg.ReconnectionPeriod != 0 {
+		p := grpc.ConnectParams{
+			Backoff:           backoff.DefaultConfig,
+			MinConnectTimeout: cfg.ReconnectionPeriod,
+		}
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+	}
+
+	return cfg
+}
+
+type (
+	// GenericOption applies an option to the HTTP or gRPC driver.
+	GenericOption interface {
+		ApplyHTTPOption(Config) Config
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// HTTPOption applies an option to the HTTP driver.
+	HTTPOption interface {
+		ApplyHTTPOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// GRPCOption applies an option to the gRPC driver.
+	GRPCOption interface {
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+	fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+	return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+	httpFn func(Config) Config
+	grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+	return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+	return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+	fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+	return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+	fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+	return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Endpoint = endpoint
+		return cfg
+	})
+}
+
+func WithCompression(compression Compression) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Compression = compression
+		return cfg
+	})
+}
+
+func WithURLPath(urlPath string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.URLPath = urlPath
+		return cfg
+	})
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.RetryConfig = rc
+		return cfg
+	})
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+	return newSplitOption(func(cfg Config) Config {
+		cfg.Metrics.TLSCfg = tlsCfg.Clone()
+		return cfg
+	}, func(cfg Config) Config {
+		cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+		return cfg
+	})
+}
+
+func WithInsecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Insecure = true
+		return cfg
+	})
+}
+
+func WithSecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Insecure = false
+		return cfg
+	})
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Headers = headers
+		return cfg
+	})
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.Timeout = duration
+		return cfg
+	})
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.TemporalitySelector = selector
+		return cfg
+	})
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Metrics.AggregationSelector = selector
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8805e31d622f534785e2561e33b9edc74f88a9e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
@@ -0,0 +1,58 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import "time"
+
+const (
+	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
+	DefaultCollectorGRPCPort uint16 = 4317
+	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
+	DefaultCollectorHTTPPort uint16 = 4318
+	// DefaultCollectorHost is the host address the Exporter will attempt
+	// connect to if no collector address is provided.
+	DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression Compression = iota
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+	// Enabled indicates whether to not retry sending batches in case of export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+	// consecutive retries will always be `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+	// Once this value is reached, the data is discarded.
+	MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
new file mode 100644
index 0000000000000000000000000000000000000000..15cbec2585047eea65c83ecdd583df73584905b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
@@ -0,0 +1,49 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certifate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+	b, err := os.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+
+	return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+
+	return &tls.Config{
+		RootCAs: cp,
+	}, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
new file mode 100644
index 0000000000000000000000000000000000000000..584785778accb3c8cbda414ec7f516fc930e2515
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages.  Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+	ErrorMessage  string
+	RejectedItems int64
+	RejectedKind  string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+	msg := ps.ErrorMessage
+	if msg == "" {
+		msg = "empty message"
+	}
+	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+	_, ok := err.(PartialSuccess)
+	return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "spans",
+	}
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "metric data points",
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
new file mode 100644
index 0000000000000000000000000000000000000000..c8d2a6ddfc77218e3b474337265a99da5c74bc55
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+	Enabled:         true,
+	InitialInterval: 5 * time.Second,
+	MaxInterval:     30 * time.Second,
+	MaxElapsedTime:  time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+	// Enabled indicates whether to not retry sending batches in case of
+	// export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before
+	// retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is
+	// reached the delay between consecutive retries will always be
+	// `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent
+	// trying to send a request/batch.  Once this value is reached, the data
+	// is discarded.
+	MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+	if !c.Enabled {
+		return func(ctx context.Context, fn func(context.Context) error) error {
+			return fn(ctx)
+		}
+	}
+
+	return func(ctx context.Context, fn func(context.Context) error) error {
+		// Do not use NewExponentialBackOff since it calls Reset and the code here
+		// must call Reset after changing the InitialInterval (this saves an
+		// unnecessary call to Now).
+		b := &backoff.ExponentialBackOff{
+			InitialInterval:     c.InitialInterval,
+			RandomizationFactor: backoff.DefaultRandomizationFactor,
+			Multiplier:          backoff.DefaultMultiplier,
+			MaxInterval:         c.MaxInterval,
+			MaxElapsedTime:      c.MaxElapsedTime,
+			Stop:                backoff.Stop,
+			Clock:               backoff.SystemClock,
+		}
+		b.Reset()
+
+		for {
+			err := fn(ctx)
+			if err == nil {
+				return nil
+			}
+
+			retryable, throttle := evaluate(err)
+			if !retryable {
+				return err
+			}
+
+			bOff := b.NextBackOff()
+			if bOff == backoff.Stop {
+				return fmt.Errorf("max retry time elapsed: %w", err)
+			}
+
+			// Wait for the greater of the backoff or throttle delay.
+			var delay time.Duration
+			if bOff > throttle {
+				delay = bOff
+			} else {
+				elapsed := b.GetElapsedTime()
+				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+					return fmt.Errorf("max retry time would elapse: %w", err)
+				}
+				delay = throttle
+			}
+
+			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+				return fmt.Errorf("%w: %s", ctxErr, err)
+			}
+		}
+	}
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait.  It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline.  This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+	timer := time.NewTimer(delay)
+	defer timer.Stop()
+
+	select {
+	case <-ctx.Done():
+		// Handle the case where the timer and context deadline end
+		// simultaneously by prioritizing the timer expiration nil value
+		// response.
+		select {
+		case <-timer.C:
+		default:
+			return ctx.Err()
+		}
+	case <-timer.C:
+	}
+
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
new file mode 100644
index 0000000000000000000000000000000000000000..7fcc144c1cdb0e6160f682e9d6c237f8d3a82826
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
@@ -0,0 +1,155 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+	l := iter.Len()
+	if l == 0 {
+		return nil
+	}
+
+	out := make([]*cpb.KeyValue, 0, l)
+	for iter.Next() {
+		out = append(out, KeyValue(iter.Attribute()))
+	}
+	return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+	if len(attrs) == 0 {
+		return nil
+	}
+
+	out := make([]*cpb.KeyValue, 0, len(attrs))
+	for _, kv := range attrs {
+		out = append(out, KeyValue(kv))
+	}
+	return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+	return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+	av := new(cpb.AnyValue)
+	switch v.Type() {
+	case attribute.BOOL:
+		av.Value = &cpb.AnyValue_BoolValue{
+			BoolValue: v.AsBool(),
+		}
+	case attribute.BOOLSLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: boolSliceValues(v.AsBoolSlice()),
+			},
+		}
+	case attribute.INT64:
+		av.Value = &cpb.AnyValue_IntValue{
+			IntValue: v.AsInt64(),
+		}
+	case attribute.INT64SLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: int64SliceValues(v.AsInt64Slice()),
+			},
+		}
+	case attribute.FLOAT64:
+		av.Value = &cpb.AnyValue_DoubleValue{
+			DoubleValue: v.AsFloat64(),
+		}
+	case attribute.FLOAT64SLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: float64SliceValues(v.AsFloat64Slice()),
+			},
+		}
+	case attribute.STRING:
+		av.Value = &cpb.AnyValue_StringValue{
+			StringValue: v.AsString(),
+		}
+	case attribute.STRINGSLICE:
+		av.Value = &cpb.AnyValue_ArrayValue{
+			ArrayValue: &cpb.ArrayValue{
+				Values: stringSliceValues(v.AsStringSlice()),
+			},
+		}
+	default:
+		av.Value = &cpb.AnyValue_StringValue{
+			StringValue: "INVALID",
+		}
+	}
+	return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_BoolValue{
+				BoolValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_IntValue{
+				IntValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_DoubleValue{
+				DoubleValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+	converted := make([]*cpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &cpb.AnyValue{
+			Value: &cpb.AnyValue_StringValue{
+				StringValue: v,
+			},
+		}
+	}
+	return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b5446e4d0772d2b9e380aaf56310aa45f3d5c84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
@@ -0,0 +1,114 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+	errUnknownAggregation = errors.New("unknown aggregation")
+	errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+	m   *mpb.Metric
+	err error
+}
+
+func (e errMetric) Unwrap() error {
+	return e.err
+}
+
+func (e errMetric) Error() string {
+	format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+	return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+	return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+	datatype string
+	errs     []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+	if len(e.errs) == 0 {
+		return nil
+	}
+	return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+	// Do not use errors.As here, this should only be flattened one layer. If
+	// there is a *multiErr several steps down the chain, all the errors above
+	// it will be discarded if errors.As is used instead.
+	switch other := err.(type) {
+	case *multiErr:
+		// Flatten err errors into e.
+		e.errs = append(e.errs, other.errs...)
+	default:
+		e.errs = append(e.errs, err)
+	}
+}
+
+func (e *multiErr) Error() string {
+	es := make([]string, len(e.errs))
+	for i, err := range e.errs {
+		es[i] = fmt.Sprintf("* %s", err)
+	}
+
+	format := "%d errors occurred transforming %s:\n\t%s"
+	return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+	switch len(e.errs) {
+	case 0:
+		return nil
+	case 1:
+		return e.errs[0]
+	}
+
+	// Return a multiErr without the leading error.
+	cp := &multiErr{
+		datatype: e.datatype,
+		errs:     make([]error, len(e.errs)-1),
+	}
+	copy(cp.errs, e.errs[1:])
+	return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+	if len(e.errs) == 0 {
+		return false
+	}
+	// Check if the first error is target.
+	return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
new file mode 100644
index 0000000000000000000000000000000000000000..985fcfc6437d0d2724690f11794bad5cb6277846
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
@@ -0,0 +1,292 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	cpb "go.opentelemetry.io/proto/otlp/common/v1"
+	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+	rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+	sms, err := ScopeMetrics(rm.ScopeMetrics)
+	return &mpb.ResourceMetrics{
+		Resource: &rpb.Resource{
+			Attributes: AttrIter(rm.Resource.Iter()),
+		},
+		ScopeMetrics: sms,
+		SchemaUrl:    rm.Resource.SchemaURL(),
+	}, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+	errs := &multiErr{datatype: "ScopeMetrics"}
+	out := make([]*mpb.ScopeMetrics, 0, len(sms))
+	for _, sm := range sms {
+		ms, err := Metrics(sm.Metrics)
+		if err != nil {
+			errs.append(err)
+		}
+
+		out = append(out, &mpb.ScopeMetrics{
+			Scope: &cpb.InstrumentationScope{
+				Name:    sm.Scope.Name,
+				Version: sm.Scope.Version,
+			},
+			Metrics:   ms,
+			SchemaUrl: sm.Scope.SchemaURL,
+		})
+	}
+	return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+	errs := &multiErr{datatype: "Metrics"}
+	out := make([]*mpb.Metric, 0, len(ms))
+	for _, m := range ms {
+		o, err := metric(m)
+		if err != nil {
+			// Do not include invalid data. Drop the metric, report the error.
+			errs.append(errMetric{m: o, err: err})
+			continue
+		}
+		out = append(out, o)
+	}
+	return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+	var err error
+	out := &mpb.Metric{
+		Name:        m.Name,
+		Description: m.Description,
+		Unit:        string(m.Unit),
+	}
+	switch a := m.Data.(type) {
+	case metricdata.Gauge[int64]:
+		out.Data = Gauge[int64](a)
+	case metricdata.Gauge[float64]:
+		out.Data = Gauge[float64](a)
+	case metricdata.Sum[int64]:
+		out.Data, err = Sum[int64](a)
+	case metricdata.Sum[float64]:
+		out.Data, err = Sum[float64](a)
+	case metricdata.Histogram[int64]:
+		out.Data, err = Histogram(a)
+	case metricdata.Histogram[float64]:
+		out.Data, err = Histogram(a)
+	case metricdata.ExponentialHistogram[int64]:
+		out.Data, err = ExponentialHistogram(a)
+	case metricdata.ExponentialHistogram[float64]:
+		out.Data, err = ExponentialHistogram(a)
+	default:
+		return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+	}
+	return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+	return &mpb.Metric_Gauge{
+		Gauge: &mpb.Gauge{
+			DataPoints: DataPoints(g.DataPoints),
+		},
+	}
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+	t, err := Temporality(s.Temporality)
+	if err != nil {
+		return nil, err
+	}
+	return &mpb.Metric_Sum{
+		Sum: &mpb.Sum{
+			AggregationTemporality: t,
+			IsMonotonic:            s.IsMonotonic,
+			DataPoints:             DataPoints(s.DataPoints),
+		},
+	}, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+	out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+	for _, dPt := range dPts {
+		ndp := &mpb.NumberDataPoint{
+			Attributes:        AttrIter(dPt.Attributes.Iter()),
+			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+			TimeUnixNano:      timeUnixNano(dPt.Time),
+		}
+		switch v := any(dPt.Value).(type) {
+		case int64:
+			ndp.Value = &mpb.NumberDataPoint_AsInt{
+				AsInt: v,
+			}
+		case float64:
+			ndp.Value = &mpb.NumberDataPoint_AsDouble{
+				AsDouble: v,
+			}
+		}
+		out = append(out, ndp)
+	}
+	return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+	t, err := Temporality(h.Temporality)
+	if err != nil {
+		return nil, err
+	}
+	return &mpb.Metric_Histogram{
+		Histogram: &mpb.Histogram{
+			AggregationTemporality: t,
+			DataPoints:             HistogramDataPoints(h.DataPoints),
+		},
+	}, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+	out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+	for _, dPt := range dPts {
+		sum := float64(dPt.Sum)
+		hdp := &mpb.HistogramDataPoint{
+			Attributes:        AttrIter(dPt.Attributes.Iter()),
+			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+			TimeUnixNano:      timeUnixNano(dPt.Time),
+			Count:             dPt.Count,
+			Sum:               &sum,
+			BucketCounts:      dPt.BucketCounts,
+			ExplicitBounds:    dPt.Bounds,
+		}
+		if v, ok := dPt.Min.Value(); ok {
+			vF64 := float64(v)
+			hdp.Min = &vF64
+		}
+		if v, ok := dPt.Max.Value(); ok {
+			vF64 := float64(v)
+			hdp.Max = &vF64
+		}
+		out = append(out, hdp)
+	}
+	return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+	t, err := Temporality(h.Temporality)
+	if err != nil {
+		return nil, err
+	}
+	return &mpb.Metric_ExponentialHistogram{
+		ExponentialHistogram: &mpb.ExponentialHistogram{
+			AggregationTemporality: t,
+			DataPoints:             ExponentialHistogramDataPoints(h.DataPoints),
+		},
+	}, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+	out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+	for _, dPt := range dPts {
+		sum := float64(dPt.Sum)
+		ehdp := &mpb.ExponentialHistogramDataPoint{
+			Attributes:        AttrIter(dPt.Attributes.Iter()),
+			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+			TimeUnixNano:      timeUnixNano(dPt.Time),
+			Count:             dPt.Count,
+			Sum:               &sum,
+			Scale:             dPt.Scale,
+			ZeroCount:         dPt.ZeroCount,
+
+			Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+			Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+		}
+		if v, ok := dPt.Min.Value(); ok {
+			vF64 := float64(v)
+			ehdp.Min = &vF64
+		}
+		if v, ok := dPt.Max.Value(); ok {
+			vF64 := float64(v)
+			ehdp.Max = &vF64
+		}
+		out = append(out, ehdp)
+	}
+	return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+	return &mpb.ExponentialHistogramDataPoint_Buckets{
+		Offset:       bucket.Offset,
+		BucketCounts: bucket.Counts,
+	}
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+	switch t {
+	case metricdata.DeltaTemporality:
+		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+	case metricdata.CumulativeTemporality:
+		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+	default:
+		err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+	}
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+	if t.IsZero() {
+		return 0
+	}
+	return uint64(t.UnixNano())
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..59d7c1c2cb9bd951bae47d4ec2dc9e2a0c8b90c2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
+func Version() string {
+	return "0.44.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
new file mode 100644
index 0000000000000000000000000000000000000000..dbb40cf5820f6733b093054edd5db68f14741027
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
@@ -0,0 +1,54 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+	"context"
+
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Client manages connections to the collector, handles the
+// transformation of data into wire format, and the transmission of that
+// data to the collector.
+type Client interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Start should establish connection(s) to endpoint(s). It is
+	// called just once by the exporter, so the implementation
+	// does not need to worry about idempotence and locking.
+	Start(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Stop should close the connections. The function is called
+	// only once by the exporter, so the implementation does not
+	// need to worry about idempotence, but it may be called
+	// concurrently with UploadTraces, so proper
+	// locking is required. The function serves as a
+	// synchronization point - after the function returns, the
+	// process of closing connections is assumed to be finished.
+	Stop(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// UploadTraces should transform the passed traces to the wire
+	// format and send it to the collector. May be called
+	// concurrently.
+	UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e642235ade4bd4734a4cd69e5b89e08b3a29e3e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlptrace contains abstractions for OTLP span exporters.
+See the official OTLP span exporter implementations:
+  - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
+  - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
+*/
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb41c7d58f2f870c55785d63473a49016a17ab3e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+	tracesdk "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var errAlreadyStarted = errors.New("already started")
+
+// Exporter exports trace data in the OTLP wire format.
+type Exporter struct {
+	client Client
+
+	mu      sync.RWMutex
+	started bool
+
+	startOnce sync.Once
+	stopOnce  sync.Once
+}
+
+// ExportSpans exports a batch of spans.
+func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
+	protoSpans := tracetransform.Spans(ss)
+	if len(protoSpans) == 0 {
+		return nil
+	}
+
+	err := e.client.UploadTraces(ctx, protoSpans)
+	if err != nil {
+		return fmt.Errorf("traces export: %w", err)
+	}
+	return nil
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+	err := errAlreadyStarted
+	e.startOnce.Do(func() {
+		e.mu.Lock()
+		e.started = true
+		e.mu.Unlock()
+		err = e.client.Start(ctx)
+	})
+
+	return err
+}
+
+// Shutdown flushes all exports and closes all connections to the receiving endpoint.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+	e.mu.RLock()
+	started := e.started
+	e.mu.RUnlock()
+
+	if !started {
+		return nil
+	}
+
+	var err error
+
+	e.stopOnce.Do(func() {
+		err = e.client.Stop(ctx)
+		e.mu.Lock()
+		e.started = false
+		e.mu.Unlock()
+	})
+
+	return err
+}
+
+var _ tracesdk.SpanExporter = (*Exporter)(nil)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, client Client) (*Exporter, error) {
+	exp := NewUnstarted(client)
+	if err := exp.Start(ctx); err != nil {
+		return nil, err
+	}
+	return exp, nil
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(client Client) *Exporter {
+	return &Exporter{
+		client: client,
+	}
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+	return struct {
+		Type   string
+		Client Client
+	}{
+		Type:   "otlptrace",
+		Client: e.client,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec74f1aad7581db68ffed85afa69bfe71dc79ade
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -0,0 +1,158 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/resource"
+	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+	if len(attrs) == 0 {
+		return nil
+	}
+
+	out := make([]*commonpb.KeyValue, 0, len(attrs))
+	for _, kv := range attrs {
+		out = append(out, KeyValue(kv))
+	}
+	return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+	l := iter.Len()
+	if l == 0 {
+		return nil
+	}
+
+	out := make([]*commonpb.KeyValue, 0, l)
+	for iter.Next() {
+		out = append(out, KeyValue(iter.Attribute()))
+	}
+	return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+	return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+	return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+	av := new(commonpb.AnyValue)
+	switch v.Type() {
+	case attribute.BOOL:
+		av.Value = &commonpb.AnyValue_BoolValue{
+			BoolValue: v.AsBool(),
+		}
+	case attribute.BOOLSLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: boolSliceValues(v.AsBoolSlice()),
+			},
+		}
+	case attribute.INT64:
+		av.Value = &commonpb.AnyValue_IntValue{
+			IntValue: v.AsInt64(),
+		}
+	case attribute.INT64SLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: int64SliceValues(v.AsInt64Slice()),
+			},
+		}
+	case attribute.FLOAT64:
+		av.Value = &commonpb.AnyValue_DoubleValue{
+			DoubleValue: v.AsFloat64(),
+		}
+	case attribute.FLOAT64SLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: float64SliceValues(v.AsFloat64Slice()),
+			},
+		}
+	case attribute.STRING:
+		av.Value = &commonpb.AnyValue_StringValue{
+			StringValue: v.AsString(),
+		}
+	case attribute.STRINGSLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: stringSliceValues(v.AsStringSlice()),
+			},
+		}
+	default:
+		av.Value = &commonpb.AnyValue_StringValue{
+			StringValue: "INVALID",
+		}
+	}
+	return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_BoolValue{
+				BoolValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_IntValue{
+				IntValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_DoubleValue{
+				DoubleValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_StringValue{
+				StringValue: v,
+			},
+		}
+	}
+	return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
new file mode 100644
index 0000000000000000000000000000000000000000..7aaec38d22ae9a8e3c66a1f026c9a6f0ed23f88c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -0,0 +1,30 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
+	if il == (instrumentation.Scope{}) {
+		return nil
+	}
+	return &commonpb.InstrumentationScope{
+		Name:    il.Name,
+		Version: il.Version,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
new file mode 100644
index 0000000000000000000000000000000000000000..05a1f78adbce4cfe18b53d96e118d26193f08db5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/sdk/resource"
+	resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// Resource transforms a Resource into an OTLP Resource.
+func Resource(r *resource.Resource) *resourcepb.Resource {
+	if r == nil {
+		return nil
+	}
+	return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
new file mode 100644
index 0000000000000000000000000000000000000000..b83cbd7247807487e978d21219ba841101d6f97b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -0,0 +1,205 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	tracesdk "go.opentelemetry.io/otel/sdk/trace"
+	"go.opentelemetry.io/otel/trace"
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
+// ResourceSpans.
+func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
+	if len(sdl) == 0 {
+		return nil
+	}
+
+	rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
+
+	type key struct {
+		r  attribute.Distinct
+		is instrumentation.Scope
+	}
+	ssm := make(map[key]*tracepb.ScopeSpans)
+
+	var resources int
+	for _, sd := range sdl {
+		if sd == nil {
+			continue
+		}
+
+		rKey := sd.Resource().Equivalent()
+		k := key{
+			r:  rKey,
+			is: sd.InstrumentationScope(),
+		}
+		scopeSpan, iOk := ssm[k]
+		if !iOk {
+			// Either the resource or instrumentation scope were unknown.
+			scopeSpan = &tracepb.ScopeSpans{
+				Scope:     InstrumentationScope(sd.InstrumentationScope()),
+				Spans:     []*tracepb.Span{},
+				SchemaUrl: sd.InstrumentationScope().SchemaURL,
+			}
+		}
+		scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
+		ssm[k] = scopeSpan
+
+		rs, rOk := rsm[rKey]
+		if !rOk {
+			resources++
+			// The resource was unknown.
+			rs = &tracepb.ResourceSpans{
+				Resource:   Resource(sd.Resource()),
+				ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
+				SchemaUrl:  sd.Resource().SchemaURL(),
+			}
+			rsm[rKey] = rs
+			continue
+		}
+
+		// The resource has been seen before. Check if the instrumentation
+		// library lookup was unknown because if so we need to add it to the
+		// ResourceSpans. Otherwise, the instrumentation library has already
+		// been seen and the append we did above will be included it in the
+		// ScopeSpans reference.
+		if !iOk {
+			rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
+		}
+	}
+
+	// Transform the categorized map into a slice
+	rss := make([]*tracepb.ResourceSpans, 0, resources)
+	for _, rs := range rsm {
+		rss = append(rss, rs)
+	}
+	return rss
+}
+
+// span transforms a Span into an OTLP span.
+func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
+	if sd == nil {
+		return nil
+	}
+
+	tid := sd.SpanContext().TraceID()
+	sid := sd.SpanContext().SpanID()
+
+	s := &tracepb.Span{
+		TraceId:                tid[:],
+		SpanId:                 sid[:],
+		TraceState:             sd.SpanContext().TraceState().String(),
+		Status:                 status(sd.Status().Code, sd.Status().Description),
+		StartTimeUnixNano:      uint64(sd.StartTime().UnixNano()),
+		EndTimeUnixNano:        uint64(sd.EndTime().UnixNano()),
+		Links:                  links(sd.Links()),
+		Kind:                   spanKind(sd.SpanKind()),
+		Name:                   sd.Name(),
+		Attributes:             KeyValues(sd.Attributes()),
+		Events:                 spanEvents(sd.Events()),
+		DroppedAttributesCount: uint32(sd.DroppedAttributes()),
+		DroppedEventsCount:     uint32(sd.DroppedEvents()),
+		DroppedLinksCount:      uint32(sd.DroppedLinks()),
+	}
+
+	if psid := sd.Parent().SpanID(); psid.IsValid() {
+		s.ParentSpanId = psid[:]
+	}
+
+	return s
+}
+
+// status transform a span code and message into an OTLP span status.
+func status(status codes.Code, message string) *tracepb.Status {
+	var c tracepb.Status_StatusCode
+	switch status {
+	case codes.Ok:
+		c = tracepb.Status_STATUS_CODE_OK
+	case codes.Error:
+		c = tracepb.Status_STATUS_CODE_ERROR
+	default:
+		c = tracepb.Status_STATUS_CODE_UNSET
+	}
+	return &tracepb.Status{
+		Code:    c,
+		Message: message,
+	}
+}
+
+// links transforms span Links to OTLP span links.
+func links(links []tracesdk.Link) []*tracepb.Span_Link {
+	if len(links) == 0 {
+		return nil
+	}
+
+	sl := make([]*tracepb.Span_Link, 0, len(links))
+	for _, otLink := range links {
+		// This redefinition is necessary to prevent otLink.*ID[:] copies
+		// being reused -- in short we need a new otLink per iteration.
+		otLink := otLink
+
+		tid := otLink.SpanContext.TraceID()
+		sid := otLink.SpanContext.SpanID()
+
+		sl = append(sl, &tracepb.Span_Link{
+			TraceId:                tid[:],
+			SpanId:                 sid[:],
+			Attributes:             KeyValues(otLink.Attributes),
+			DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
+		})
+	}
+	return sl
+}
+
+// spanEvents transforms span Events to an OTLP span events.
+func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
+	if len(es) == 0 {
+		return nil
+	}
+
+	events := make([]*tracepb.Span_Event, len(es))
+	// Transform message events
+	for i := 0; i < len(es); i++ {
+		events[i] = &tracepb.Span_Event{
+			Name:                   es[i].Name,
+			TimeUnixNano:           uint64(es[i].Time.UnixNano()),
+			Attributes:             KeyValues(es[i].Attributes),
+			DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
+		}
+	}
+	return events
+}
+
+// spanKind transforms a SpanKind to an OTLP span kind.
+func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
+	switch kind {
+	case trace.SpanKindInternal:
+		return tracepb.Span_SPAN_KIND_INTERNAL
+	case trace.SpanKindClient:
+		return tracepb.Span_SPAN_KIND_CLIENT
+	case trace.SpanKindServer:
+		return tracepb.Span_SPAN_KIND_SERVER
+	case trace.SpanKindProducer:
+		return tracepb.Span_SPAN_KIND_PRODUCER
+	case trace.SpanKindConsumer:
+		return tracepb.Span_SPAN_KIND_CONSUMER
+	default:
+		return tracepb.Span_SPAN_KIND_UNSPECIFIED
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..b4cc21d7a3c60e2f411f214730abe8628614556c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -0,0 +1,306 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"google.golang.org/genproto/googleapis/rpc/errdetails"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+	coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+type client struct {
+	endpoint      string
+	dialOpts      []grpc.DialOption
+	metadata      metadata.MD
+	exportTimeout time.Duration
+	requestFunc   retry.RequestFunc
+
+	// stopCtx is used as a parent context for all exports. Therefore, when it
+	// is canceled with the stopFunc all exports are canceled.
+	stopCtx context.Context
+	// stopFunc cancels stopCtx, stopping any active exports.
+	stopFunc context.CancelFunc
+
+	// ourConn keeps track of where conn was created: true if created here on
+	// Start, or false if passed with an option. This is important on Shutdown
+	// as the conn should only be closed if created here on start. Otherwise,
+	// it is up to the processes that passed the conn to close it.
+	ourConn bool
+	conn    *grpc.ClientConn
+	tscMu   sync.RWMutex
+	tsc     coltracepb.TraceServiceClient
+}
+
+// Compile time check *client implements otlptrace.Client.
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new gRPC trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+	return newClient(opts...)
+}
+
+func newClient(opts ...Option) *client {
+	cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+	ctx, cancel := context.WithCancel(context.Background())
+
+	c := &client{
+		endpoint:      cfg.Traces.Endpoint,
+		exportTimeout: cfg.Traces.Timeout,
+		requestFunc:   cfg.RetryConfig.RequestFunc(retryable),
+		dialOpts:      cfg.DialOptions,
+		stopCtx:       ctx,
+		stopFunc:      cancel,
+		conn:          cfg.GRPCConn,
+	}
+
+	if len(cfg.Traces.Headers) > 0 {
+		c.metadata = metadata.New(cfg.Traces.Headers)
+	}
+
+	return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *client) Start(ctx context.Context) error {
+	if c.conn == nil {
+		// If the caller did not provide a ClientConn when the client was
+		// created, create one using the configuration they did provide.
+		conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
+		if err != nil {
+			return err
+		}
+		// Keep track that we own the lifecycle of this conn and need to close
+		// it on Shutdown.
+		c.ourConn = true
+		c.conn = conn
+	}
+
+	// The otlptrace.Client interface states this method is called just once,
+	// so no need to check if already started.
+	c.tscMu.Lock()
+	c.tsc = coltracepb.NewTraceServiceClient(c.conn)
+	c.tscMu.Unlock()
+
+	return nil
+}
+
+var errAlreadyStopped = errors.New("the client is already stopped")
+
+// Stop shuts down the client.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadTraces method of the client. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All client held resources
+// will still be released in this situation.
+//
+// If the client has already stopped, an error will be returned describing
+// this.
+func (c *client) Stop(ctx context.Context) error {
+	// Make sure to return context error if the context is done when calling this method.
+	err := ctx.Err()
+
+	// Acquire the c.tscMu lock within the ctx lifetime.
+	acquired := make(chan struct{})
+	go func() {
+		c.tscMu.Lock()
+		close(acquired)
+	}()
+
+	select {
+	case <-ctx.Done():
+		// The Stop timeout is reached. Kill any remaining exports to force
+		// the clear of the lock and save the timeout error to return and
+		// signal the shutdown timed out before cleanly stopping.
+		c.stopFunc()
+		err = ctx.Err()
+
+		// To ensure the client is not left in a dirty state c.tsc needs to be
+		// set to nil. To avoid the race condition when doing this, ensure
+		// that all the exports are killed (initiated by c.stopFunc).
+		<-acquired
+	case <-acquired:
+	}
+	// Hold the tscMu lock for the rest of the function to ensure no new
+	// exports are started.
+	defer c.tscMu.Unlock()
+
+	// The otlptrace.Client interface states this method is called only
+	// once, but there is no guarantee it is called after Start. Ensure the
+	// client is started before doing anything and let the called know if they
+	// made a mistake.
+	if c.tsc == nil {
+		return errAlreadyStopped
+	}
+
+	// Clear c.tsc to signal the client is stopped.
+	c.tsc = nil
+
+	if c.ourConn {
+		closeErr := c.conn.Close()
+		// A context timeout error takes precedence over this error.
+		if err == nil && closeErr != nil {
+			err = closeErr
+		}
+	}
+	return err
+}
+
+var errShutdown = errors.New("the client is shutdown")
+
+// UploadTraces sends a batch of spans.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+	// Hold a read lock to ensure a shut down initiated after this starts does
+	// not abandon the export. This read lock acquire has less priority than a
+	// write lock acquire (i.e. Stop), meaning if the client is shutting down
+	// this will come after the shut down.
+	c.tscMu.RLock()
+	defer c.tscMu.RUnlock()
+
+	if c.tsc == nil {
+		return errShutdown
+	}
+
+	ctx, cancel := c.exportContext(ctx)
+	defer cancel()
+
+	return c.requestFunc(ctx, func(iCtx context.Context) error {
+		resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
+			ResourceSpans: protoSpans,
+		})
+		if resp != nil && resp.PartialSuccess != nil {
+			msg := resp.PartialSuccess.GetErrorMessage()
+			n := resp.PartialSuccess.GetRejectedSpans()
+			if n != 0 || msg != "" {
+				err := internal.TracePartialSuccessError(n, msg)
+				otel.Handle(err)
+			}
+		}
+		// nil is converted to OK.
+		if status.Code(err) == codes.OK {
+			// Success.
+			return nil
+		}
+		return err
+	})
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+	var (
+		ctx    context.Context
+		cancel context.CancelFunc
+	)
+
+	if c.exportTimeout > 0 {
+		ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+	} else {
+		ctx, cancel = context.WithCancel(parent)
+	}
+
+	if c.metadata.Len() > 0 {
+		ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+	}
+
+	// Unify the client stopCtx with the parent.
+	go func() {
+		select {
+		case <-ctx.Done():
+		case <-c.stopCtx.Done():
+			// Cancel the export as the shutdown has timed out.
+			cancel()
+		}
+	}()
+
+	return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+	s := status.Convert(err)
+	return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+	switch s.Code() {
+	case codes.Canceled,
+		codes.DeadlineExceeded,
+		codes.Aborted,
+		codes.OutOfRange,
+		codes.Unavailable,
+		codes.DataLoss:
+		// Additionally handle RetryInfo.
+		_, d := throttleDelay(s)
+		return true, d
+	case codes.ResourceExhausted:
+		// Retry only if the server signals that the recovery from resource exhaustion is possible.
+		return throttleDelay(s)
+	}
+
+	// Not a retry-able error.
+	return false, 0
+}
+
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+	for _, detail := range s.Details() {
+		if t, ok := detail.(*errdetails.RetryInfo); ok {
+			return true, t.RetryDelay.AsDuration()
+		}
+	}
+	return false, 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *client) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Endpoint string
+	}{
+		Type:     "otlphttpgrpc",
+		Endpoint: c.endpoint,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..a3c2690c5d0e903ecc0a17950d9c3341a8e8ff66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath  to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..89af41002f7a9d5722f9f3eaa2033959b5597594
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+	return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+	return otlptrace.NewUnstarted(NewClient(opts...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..5530119e4cf878793486740d912d59d75d9ab984
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+	GetEnv    func(string) string
+	ReadFile  func(string) ([]byte, error)
+	Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+	for _, o := range opts {
+		o(e)
+	}
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+	return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(v)
+		}
+	}
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b := strings.ToLower(v) == "true"
+			fn(b)
+		}
+	}
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			d, err := strconv.Atoi(v)
+			if err != nil {
+				global.Error(err, "parse duration", "input", v)
+				return
+			}
+			fn(time.Duration(d) * time.Millisecond)
+		}
+	}
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(stringToHeader(v))
+		}
+	}
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			u, err := url.Parse(v)
+			if err != nil {
+				global.Error(err, "parse url", "input", v)
+				return
+			}
+			fn(u)
+		}
+	}
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b, err := e.ReadFile(v)
+			if err != nil {
+				global.Error(err, "read tls ca cert file", "file", v)
+				return
+			}
+			c, err := createCertPool(b)
+			if err != nil {
+				global.Error(err, "create tls cert pool")
+				return
+			}
+			fn(c)
+		}
+	}
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		vc, okc := e.GetEnvValue(nc)
+		vk, okk := e.GetEnvValue(nk)
+		if !okc || !okk {
+			return
+		}
+		cert, err := e.ReadFile(vc)
+		if err != nil {
+			global.Error(err, "read tls client cert", "file", vc)
+			return
+		}
+		key, err := e.ReadFile(vk)
+		if err != nil {
+			global.Error(err, "read tls client key", "file", vk)
+			return
+		}
+		crt, err := tls.X509KeyPair(cert, key)
+		if err != nil {
+			global.Error(err, "create tls client key pair")
+			return
+		}
+		fn(crt)
+	}
+}
+
+func keyWithNamespace(ns, key string) string {
+	if ns == "" {
+		return key
+	}
+	return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+	headersPairs := strings.Split(value, ",")
+	headers := make(map[string]string)
+
+	for _, header := range headersPairs {
+		n, v, found := strings.Cut(header, "=")
+		if !found {
+			global.Error(errors.New("missing '="), "parse headers", "input", header)
+			continue
+		}
+		name, err := url.PathUnescape(n)
+		if err != nil {
+			global.Error(err, "escape header key", "key", n)
+			continue
+		}
+		trimmedName := strings.TrimSpace(name)
+		value, err := url.PathUnescape(v)
+		if err != nil {
+			global.Error(err, "escape header value", "value", v)
+			continue
+		}
+		trimmedValue := strings.TrimSpace(value)
+
+		headers[trimmedName] = trimmedValue
+	}
+
+	return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+	return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..1fb29061894c063140e69dbcd7383f045bc17e83
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -0,0 +1,35 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..32f6dddb4f6f5ff1f5c94f8a6b1bbf371f6e3c90
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -0,0 +1,153 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+	GetEnv:    os.Getenv,
+	ReadFile:  os.ReadFile,
+	Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+	return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+	opts := []GenericOption{}
+
+	tlsConf := &tls.Config{}
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Traces.Endpoint = u.Host
+				// For OTLP/HTTP endpoint URLs without a per-signal
+				// configuration, the passed endpoint is used as a base URL
+				// and the signals are sent to these paths relative to that.
+				cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Traces.Endpoint = u.Host
+				// For endpoint URLs for OTLP/HTTP per-signal variables, the
+				// URL MUST be used as-is without any modification. The only
+				// exception is that if an URL contains no path part, the root
+				// path / MUST be used.
+				path := u.Path
+				if path == "" {
+					path = "/"
+				}
+				cfg.Traces.URLPath = path
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+	)
+
+	return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+	switch strings.ToLower(u.Scheme) {
+	case "http", "unix":
+		return WithInsecure()
+	default:
+		return WithSecure()
+	}
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+	return func(cfg Config) Config {
+		// For OTLP/gRPC endpoints, this is the target to which the
+		// exporter is going to send telemetry.
+		cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+		return cfg
+	}
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			cp := NoCompression
+			if v == "gzip" {
+				cp = GzipCompression
+			}
+
+			fn(cp)
+		}
+	}
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+	if b {
+		return WithInsecure()
+	}
+	return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if c.RootCAs != nil || len(c.Certificates) > 0 {
+			fn(c)
+		}
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0203cbe723f99734f5dd25eed52bf173fac6ee0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -0,0 +1,345 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/url"
+	"path"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/encoding/gzip"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+const (
+	// DefaultTracesPath is a default URL path for endpoint that
+	// receives spans.
+	DefaultTracesPath string = "/v1/traces"
+	// DefaultTimeout is a default max waiting time for the backend to process
+	// each span batch.
+	DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+	SignalConfig struct {
+		Endpoint    string
+		Insecure    bool
+		TLSCfg      *tls.Config
+		Headers     map[string]string
+		Compression Compression
+		Timeout     time.Duration
+		URLPath     string
+
+		// gRPC configurations
+		GRPCCredentials credentials.TransportCredentials
+	}
+
+	Config struct {
+		// Signal specific configurations
+		Traces SignalConfig
+
+		RetryConfig retry.Config
+
+		// gRPC configurations
+		ReconnectionPeriod time.Duration
+		ServiceConfig      string
+		DialOptions        []grpc.DialOption
+		GRPCConn           *grpc.ClientConn
+	}
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+	cfg := Config{
+		Traces: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+			URLPath:     DefaultTracesPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyHTTPEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+	return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+	userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+	cfg := Config{
+		Traces: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+			URLPath:     DefaultTracesPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+		DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
+	}
+	cfg = ApplyGRPCEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+
+	if cfg.ServiceConfig != "" {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+	}
+	// Priroritize GRPCCredentials over Insecure (passing both is an error).
+	if cfg.Traces.GRPCCredentials != nil {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+	} else if cfg.Traces.Insecure {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		// Default to using the host's root CA.
+		creds := credentials.NewTLS(nil)
+		cfg.Traces.GRPCCredentials = creds
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+	}
+	if cfg.Traces.Compression == GzipCompression {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+	}
+	if cfg.ReconnectionPeriod != 0 {
+		p := grpc.ConnectParams{
+			Backoff:           backoff.DefaultConfig,
+			MinConnectTimeout: cfg.ReconnectionPeriod,
+		}
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+	}
+
+	return cfg
+}
+
+type (
+	// GenericOption applies an option to the HTTP or gRPC driver.
+	GenericOption interface {
+		ApplyHTTPOption(Config) Config
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// HTTPOption applies an option to the HTTP driver.
+	HTTPOption interface {
+		ApplyHTTPOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// GRPCOption applies an option to the gRPC driver.
+	GRPCOption interface {
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+	fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+	return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+	httpFn func(Config) Config
+	grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+	return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+	return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+	fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+	return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+	fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+	return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Endpoint = endpoint
+		return cfg
+	})
+}
+
+func WithEndpointURL(v string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		u, err := url.Parse(v)
+		if err != nil {
+			global.Error(err, "otlptrace: parse endpoint url", "url", v)
+			return cfg
+		}
+
+		cfg.Traces.Endpoint = u.Host
+		cfg.Traces.URLPath = u.Path
+		if u.Scheme != "https" {
+			cfg.Traces.Insecure = true
+		}
+
+		return cfg
+	})
+}
+
+func WithCompression(compression Compression) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Compression = compression
+		return cfg
+	})
+}
+
+func WithURLPath(urlPath string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.URLPath = urlPath
+		return cfg
+	})
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.RetryConfig = rc
+		return cfg
+	})
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+	return newSplitOption(func(cfg Config) Config {
+		cfg.Traces.TLSCfg = tlsCfg.Clone()
+		return cfg
+	}, func(cfg Config) Config {
+		cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+		return cfg
+	})
+}
+
+func WithInsecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Insecure = true
+		return cfg
+	})
+}
+
+func WithSecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Insecure = false
+		return cfg
+	})
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Headers = headers
+		return cfg
+	})
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Timeout = duration
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9dcdc96e7da948ade4e76b7ca0b4a467af12216
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,51 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+const (
+	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
+	DefaultCollectorGRPCPort uint16 = 4317
+	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
+	DefaultCollectorHTTPPort uint16 = 4318
+	// DefaultCollectorHost is the host address the Exporter will attempt
+	// connect to if no collector address is provided.
+	DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression Compression = iota
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+	// MarshalProto tells the driver to send using the protobuf binary format.
+	MarshalProto Marshaler = iota
+	// MarshalJSON tells the driver to send using json format.
+	MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
new file mode 100644
index 0000000000000000000000000000000000000000..19b6d4b21f990dffc9cfb2c7036a4a7bb0affe6c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -0,0 +1,37 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+
+	return &tls.Config{
+		RootCAs: cp,
+	}, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
new file mode 100644
index 0000000000000000000000000000000000000000..076905e54bfc4b25d37d68b2d92757ec82c36ba9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages.  Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+	ErrorMessage  string
+	RejectedItems int64
+	RejectedKind  string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+	msg := ps.ErrorMessage
+	if msg == "" {
+		msg = "empty message"
+	}
+	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+	_, ok := err.(PartialSuccess)
+	return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "spans",
+	}
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "metric data points",
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ce7d6632b8694d7ac081454112d120ceac34be9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+	Enabled:         true,
+	InitialInterval: 5 * time.Second,
+	MaxInterval:     30 * time.Second,
+	MaxElapsedTime:  time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+	// Enabled indicates whether to not retry sending batches in case of
+	// export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before
+	// retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is
+	// reached the delay between consecutive retries will always be
+	// `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent
+	// trying to send a request/batch.  Once this value is reached, the data
+	// is discarded.
+	MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+	if !c.Enabled {
+		return func(ctx context.Context, fn func(context.Context) error) error {
+			return fn(ctx)
+		}
+	}
+
+	return func(ctx context.Context, fn func(context.Context) error) error {
+		// Do not use NewExponentialBackOff since it calls Reset and the code here
+		// must call Reset after changing the InitialInterval (this saves an
+		// unnecessary call to Now).
+		b := &backoff.ExponentialBackOff{
+			InitialInterval:     c.InitialInterval,
+			RandomizationFactor: backoff.DefaultRandomizationFactor,
+			Multiplier:          backoff.DefaultMultiplier,
+			MaxInterval:         c.MaxInterval,
+			MaxElapsedTime:      c.MaxElapsedTime,
+			Stop:                backoff.Stop,
+			Clock:               backoff.SystemClock,
+		}
+		b.Reset()
+
+		for {
+			err := fn(ctx)
+			if err == nil {
+				return nil
+			}
+
+			retryable, throttle := evaluate(err)
+			if !retryable {
+				return err
+			}
+
+			bOff := b.NextBackOff()
+			if bOff == backoff.Stop {
+				return fmt.Errorf("max retry time elapsed: %w", err)
+			}
+
+			// Wait for the greater of the backoff or throttle delay.
+			var delay time.Duration
+			if bOff > throttle {
+				delay = bOff
+			} else {
+				elapsed := b.GetElapsedTime()
+				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+					return fmt.Errorf("max retry time would elapse: %w", err)
+				}
+				delay = throttle
+			}
+
+			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+				return fmt.Errorf("%w: %s", ctxErr, err)
+			}
+		}
+	}
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait.  It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline.  This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+	timer := time.NewTimer(delay)
+	defer timer.Stop()
+
+	select {
+	case <-ctx.Done():
+		// Handle the case where the timer and context deadline end
+		// simultaneously by prioritizing the timer expiration nil value
+		// response.
+		select {
+		case <-timer.C:
+		default:
+			return ctx.Err()
+		}
+	case <-timer.C:
+	}
+
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..0fddac75895c1d5db34327128bb8915a35e49bac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -0,0 +1,213 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+	applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+	converted := make([]otlpconfig.GRPCOption, len(opts))
+	for i, o := range opts {
+		converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying export of span batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+	return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, client security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+	return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithEndpoint sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+	return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.ReconnectionPeriod = rp
+		return cfg
+	})}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+	if compressor == "gzip" {
+		return otlpconfig.GzipCompression
+	}
+
+	otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+	return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC client to use when sending
+// requests. Supported compressor values: "gzip".
+func WithCompressor(compressor string) Option {
+	return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.Traces.GRPCCredentials = creds
+		return cfg
+	})}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.ServiceConfig = serviceConfig
+		return cfg
+	})}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.DialOptions = opts
+		return cfg
+	})}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The client
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.GRPCConn = conn
+		return cfg
+	})}
+}
+
+// WithTimeout sets the max amount of time a client will attempt to export a
+// batch of spans. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of spans is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of spans.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+	return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b5f3839f2795397fd117290850919df50bf703b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
@@ -0,0 +1,352 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+
+import (
+	"bytes"
+	"compress/gzip"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"sync"
+	"time"
+
+	"google.golang.org/protobuf/proto"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+	coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+const contentTypeProto = "application/x-protobuf"
+
+var gzPool = sync.Pool{
+	New: func() interface{} {
+		w := gzip.NewWriter(io.Discard)
+		return w
+	},
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by other package.
+var ourTransport = &http.Transport{
+	Proxy: http.ProxyFromEnvironment,
+	DialContext: (&net.Dialer{
+		Timeout:   30 * time.Second,
+		KeepAlive: 30 * time.Second,
+	}).DialContext,
+	ForceAttemptHTTP2:     true,
+	MaxIdleConns:          100,
+	IdleConnTimeout:       90 * time.Second,
+	TLSHandshakeTimeout:   10 * time.Second,
+	ExpectContinueTimeout: 1 * time.Second,
+}
+
+type client struct {
+	name        string
+	cfg         otlpconfig.SignalConfig
+	generalCfg  otlpconfig.Config
+	requestFunc retry.RequestFunc
+	client      *http.Client
+	stopCh      chan struct{}
+	stopOnce    sync.Once
+}
+
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new HTTP trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+	cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
+
+	httpClient := &http.Client{
+		Transport: ourTransport,
+		Timeout:   cfg.Traces.Timeout,
+	}
+	if cfg.Traces.TLSCfg != nil {
+		transport := ourTransport.Clone()
+		transport.TLSClientConfig = cfg.Traces.TLSCfg
+		httpClient.Transport = transport
+	}
+
+	stopCh := make(chan struct{})
+	return &client{
+		name:        "traces",
+		cfg:         cfg.Traces,
+		generalCfg:  cfg,
+		requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+		stopCh:      stopCh,
+		client:      httpClient,
+	}
+}
+
+// Start does nothing in a HTTP client.
+func (d *client) Start(ctx context.Context) error {
+	// nothing to do
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+	return nil
+}
+
+// Stop shuts down the client and interrupt any in-flight request.
+func (d *client) Stop(ctx context.Context) error {
+	d.stopOnce.Do(func() {
+		close(d.stopCh)
+	})
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+	return nil
+}
+
+// UploadTraces sends a batch of spans to the collector.
+func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+	pbRequest := &coltracepb.ExportTraceServiceRequest{
+		ResourceSpans: protoSpans,
+	}
+	rawRequest, err := proto.Marshal(pbRequest)
+	if err != nil {
+		return err
+	}
+
+	ctx, cancel := d.contextWithStop(ctx)
+	defer cancel()
+
+	request, err := d.newRequest(rawRequest)
+	if err != nil {
+		return err
+	}
+
+	return d.requestFunc(ctx, func(ctx context.Context) error {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		request.reset(ctx)
+		resp, err := d.client.Do(request.Request)
+		var urlErr *url.Error
+		if errors.As(err, &urlErr) && urlErr.Temporary() {
+			return newResponseError(http.Header{})
+		}
+		if err != nil {
+			return err
+		}
+
+		if resp != nil && resp.Body != nil {
+			defer func() {
+				if err := resp.Body.Close(); err != nil {
+					otel.Handle(err)
+				}
+			}()
+		}
+
+		switch sc := resp.StatusCode; {
+		case sc >= 200 && sc <= 299:
+			// Success, do not retry.
+			// Read the partial success message, if any.
+			var respData bytes.Buffer
+			if _, err := io.Copy(&respData, resp.Body); err != nil {
+				return err
+			}
+			if respData.Len() == 0 {
+				return nil
+			}
+
+			if resp.Header.Get("Content-Type") == "application/x-protobuf" {
+				var respProto coltracepb.ExportTraceServiceResponse
+				if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+					return err
+				}
+
+				if respProto.PartialSuccess != nil {
+					msg := respProto.PartialSuccess.GetErrorMessage()
+					n := respProto.PartialSuccess.GetRejectedSpans()
+					if n != 0 || msg != "" {
+						err := internal.TracePartialSuccessError(n, msg)
+						otel.Handle(err)
+					}
+				}
+			}
+			return nil
+
+		case sc == http.StatusTooManyRequests,
+			sc == http.StatusBadGateway,
+			sc == http.StatusServiceUnavailable,
+			sc == http.StatusGatewayTimeout:
+			// Retry-able failures.  Drain the body to reuse the connection.
+			if _, err := io.Copy(io.Discard, resp.Body); err != nil {
+				otel.Handle(err)
+			}
+			return newResponseError(resp.Header)
+		default:
+			return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status)
+		}
+	})
+}
+
+func (d *client) newRequest(body []byte) (request, error) {
+	u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath}
+	r, err := http.NewRequest(http.MethodPost, u.String(), nil)
+	if err != nil {
+		return request{Request: r}, err
+	}
+
+	userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+	r.Header.Set("User-Agent", userAgent)
+
+	for k, v := range d.cfg.Headers {
+		r.Header.Set(k, v)
+	}
+	r.Header.Set("Content-Type", contentTypeProto)
+
+	req := request{Request: r}
+	switch Compression(d.cfg.Compression) {
+	case NoCompression:
+		r.ContentLength = (int64)(len(body))
+		req.bodyReader = bodyReader(body)
+	case GzipCompression:
+		// Ensure the content length is not used.
+		r.ContentLength = -1
+		r.Header.Set("Content-Encoding", "gzip")
+
+		gz := gzPool.Get().(*gzip.Writer)
+		defer gzPool.Put(gz)
+
+		var b bytes.Buffer
+		gz.Reset(&b)
+
+		if _, err := gz.Write(body); err != nil {
+			return req, err
+		}
+		// Close needs to be called to ensure body if fully written.
+		if err := gz.Close(); err != nil {
+			return req, err
+		}
+
+		req.bodyReader = bodyReader(b.Bytes())
+	}
+
+	return req, nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (d *client) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Endpoint string
+		Insecure bool
+	}{
+		Type:     "otlphttphttp",
+		Endpoint: d.cfg.Endpoint,
+		Insecure: d.cfg.Insecure,
+	}
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+	return func() io.ReadCloser {
+		return io.NopCloser(bytes.NewReader(buf))
+	}
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+	*http.Request
+
+	// bodyReader allows the same body to be used for multiple requests.
+	bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+	r.Body = r.bodyReader()
+	r.Request = r.Request.WithContext(ctx)
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+	throttle int64
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers.
+func newResponseError(header http.Header) error {
+	var rErr retryableError
+	if s, ok := header["Retry-After"]; ok {
+		if t, err := strconv.ParseInt(s[0], 10, 64); err == nil {
+			rErr.throttle = t
+		}
+	}
+	return rErr
+}
+
+func (e retryableError) Error() string {
+	return "retry-able request failure"
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+	if err == nil {
+		return false, 0
+	}
+
+	rErr, ok := err.(retryableError)
+	if !ok {
+		return false, 0
+	}
+
+	return true, time.Duration(rErr.throttle)
+}
+
+func (d *client) getScheme() string {
+	if d.cfg.Insecure {
+		return "http"
+	}
+	return "https"
+}
+
+func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
+	// Unify the parent context Done signal with the client's stop
+	// channel.
+	ctx, cancel := context.WithCancel(ctx)
+	go func(ctx context.Context, cancel context.CancelFunc) {
+		select {
+		case <-ctx.Done():
+			// Nothing to do, either cancelled or deadline
+			// happened.
+		case <-d.stopCh:
+			cancel()
+		}
+	}(ctx, cancel)
+	return ctx, cancel
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb4f19ad16bea0bf497d6c02a6b6fff4cae57d26
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
@@ -0,0 +1,74 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlptracehttp provides an OTLP span exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/traces.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/traces" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+environment variable and by [WithEndpoint], [WithEndpointURL], [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the compression strategy the exporter uses to compress the HTTP body.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath  to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..23b8642040d3231f4347b478cf7166c77c065966
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+	return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+	return otlptrace.NewUnstarted(NewClient(opts...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..8016b7a0b88103d8088b04e3ae6d7ab961082df9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+	GetEnv    func(string) string
+	ReadFile  func(string) ([]byte, error)
+	Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+	for _, o := range opts {
+		o(e)
+	}
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+	return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(v)
+		}
+	}
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b := strings.ToLower(v) == "true"
+			fn(b)
+		}
+	}
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			d, err := strconv.Atoi(v)
+			if err != nil {
+				global.Error(err, "parse duration", "input", v)
+				return
+			}
+			fn(time.Duration(d) * time.Millisecond)
+		}
+	}
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(stringToHeader(v))
+		}
+	}
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			u, err := url.Parse(v)
+			if err != nil {
+				global.Error(err, "parse url", "input", v)
+				return
+			}
+			fn(u)
+		}
+	}
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b, err := e.ReadFile(v)
+			if err != nil {
+				global.Error(err, "read tls ca cert file", "file", v)
+				return
+			}
+			c, err := createCertPool(b)
+			if err != nil {
+				global.Error(err, "create tls cert pool")
+				return
+			}
+			fn(c)
+		}
+	}
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		vc, okc := e.GetEnvValue(nc)
+		vk, okk := e.GetEnvValue(nk)
+		if !okc || !okk {
+			return
+		}
+		cert, err := e.ReadFile(vc)
+		if err != nil {
+			global.Error(err, "read tls client cert", "file", vc)
+			return
+		}
+		key, err := e.ReadFile(vk)
+		if err != nil {
+			global.Error(err, "read tls client key", "file", vk)
+			return
+		}
+		crt, err := tls.X509KeyPair(cert, key)
+		if err != nil {
+			global.Error(err, "create tls client key pair")
+			return
+		}
+		fn(crt)
+	}
+}
+
+func keyWithNamespace(ns, key string) string {
+	if ns == "" {
+		return key
+	}
+	return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+	headersPairs := strings.Split(value, ",")
+	headers := make(map[string]string)
+
+	for _, header := range headersPairs {
+		n, v, found := strings.Cut(header, "=")
+		if !found {
+			global.Error(errors.New("missing '="), "parse headers", "input", header)
+			continue
+		}
+		name, err := url.PathUnescape(n)
+		if err != nil {
+			global.Error(err, "escape header key", "key", n)
+			continue
+		}
+		trimmedName := strings.TrimSpace(name)
+		value, err := url.PathUnescape(v)
+		if err != nil {
+			global.Error(err, "escape header value", "value", v)
+			continue
+		}
+		trimmedValue := strings.TrimSpace(value)
+
+		headers[trimmedName] = trimmedValue
+	}
+
+	return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+	return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..01347d8c651ced52093697e301f635907a3ab926
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
@@ -0,0 +1,35 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
new file mode 100644
index 0000000000000000000000000000000000000000..45f137a7872afdf737416544fb6c56c30fde5e1e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
@@ -0,0 +1,153 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+	GetEnv:    os.Getenv,
+	ReadFile:  os.ReadFile,
+	Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+	return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+	opts := []GenericOption{}
+
+	tlsConf := &tls.Config{}
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Traces.Endpoint = u.Host
+				// For OTLP/HTTP endpoint URLs without a per-signal
+				// configuration, the passed endpoint is used as a base URL
+				// and the signals are sent to these paths relative to that.
+				cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Traces.Endpoint = u.Host
+				// For endpoint URLs for OTLP/HTTP per-signal variables, the
+				// URL MUST be used as-is without any modification. The only
+				// exception is that if an URL contains no path part, the root
+				// path / MUST be used.
+				path := u.Path
+				if path == "" {
+					path = "/"
+				}
+				cfg.Traces.URLPath = path
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+	)
+
+	return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+	switch strings.ToLower(u.Scheme) {
+	case "http", "unix":
+		return WithInsecure()
+	default:
+		return WithSecure()
+	}
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+	return func(cfg Config) Config {
+		// For OTLP/gRPC endpoints, this is the target to which the
+		// exporter is going to send telemetry.
+		cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+		return cfg
+	}
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			cp := NoCompression
+			if v == "gzip" {
+				cp = GzipCompression
+			}
+
+			fn(cp)
+		}
+	}
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+	if b {
+		return WithInsecure()
+	}
+	return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if c.RootCAs != nil || len(c.Certificates) > 0 {
+			fn(c)
+		}
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b81641a764c7a901ded90f8494661bf82d1a746
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
@@ -0,0 +1,345 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/url"
+	"path"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/encoding/gzip"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+const (
+	// DefaultTracesPath is a default URL path for endpoint that
+	// receives spans.
+	DefaultTracesPath string = "/v1/traces"
+	// DefaultTimeout is a default max waiting time for the backend to process
+	// each span batch.
+	DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+	SignalConfig struct {
+		Endpoint    string
+		Insecure    bool
+		TLSCfg      *tls.Config
+		Headers     map[string]string
+		Compression Compression
+		Timeout     time.Duration
+		URLPath     string
+
+		// gRPC configurations
+		GRPCCredentials credentials.TransportCredentials
+	}
+
+	Config struct {
+		// Signal specific configurations
+		Traces SignalConfig
+
+		RetryConfig retry.Config
+
+		// gRPC configurations
+		ReconnectionPeriod time.Duration
+		ServiceConfig      string
+		DialOptions        []grpc.DialOption
+		GRPCConn           *grpc.ClientConn
+	}
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+	cfg := Config{
+		Traces: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+			URLPath:     DefaultTracesPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyHTTPEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+	return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+	userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+	cfg := Config{
+		Traces: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+			URLPath:     DefaultTracesPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+		DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
+	}
+	cfg = ApplyGRPCEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+
+	if cfg.ServiceConfig != "" {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+	}
+	// Priroritize GRPCCredentials over Insecure (passing both is an error).
+	if cfg.Traces.GRPCCredentials != nil {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+	} else if cfg.Traces.Insecure {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		// Default to using the host's root CA.
+		creds := credentials.NewTLS(nil)
+		cfg.Traces.GRPCCredentials = creds
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+	}
+	if cfg.Traces.Compression == GzipCompression {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+	}
+	if cfg.ReconnectionPeriod != 0 {
+		p := grpc.ConnectParams{
+			Backoff:           backoff.DefaultConfig,
+			MinConnectTimeout: cfg.ReconnectionPeriod,
+		}
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+	}
+
+	return cfg
+}
+
+type (
+	// GenericOption applies an option to the HTTP or gRPC driver.
+	GenericOption interface {
+		ApplyHTTPOption(Config) Config
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// HTTPOption applies an option to the HTTP driver.
+	HTTPOption interface {
+		ApplyHTTPOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// GRPCOption applies an option to the gRPC driver.
+	GRPCOption interface {
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+	fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+	return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+	httpFn func(Config) Config
+	grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+	return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+	return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+	fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+	return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+	fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+	return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Endpoint = endpoint
+		return cfg
+	})
+}
+
+func WithEndpointURL(v string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		u, err := url.Parse(v)
+		if err != nil {
+			global.Error(err, "otlptrace: parse endpoint url", "url", v)
+			return cfg
+		}
+
+		cfg.Traces.Endpoint = u.Host
+		cfg.Traces.URLPath = u.Path
+		if u.Scheme != "https" {
+			cfg.Traces.Insecure = true
+		}
+
+		return cfg
+	})
+}
+
+func WithCompression(compression Compression) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Compression = compression
+		return cfg
+	})
+}
+
+func WithURLPath(urlPath string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.URLPath = urlPath
+		return cfg
+	})
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.RetryConfig = rc
+		return cfg
+	})
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+	return newSplitOption(func(cfg Config) Config {
+		cfg.Traces.TLSCfg = tlsCfg.Clone()
+		return cfg
+	}, func(cfg Config) Config {
+		cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+		return cfg
+	})
+}
+
+func WithInsecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Insecure = true
+		return cfg
+	})
+}
+
+func WithSecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Insecure = false
+		return cfg
+	})
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Headers = headers
+		return cfg
+	})
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Timeout = duration
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..8625674855d1726cbff4c9acb2ba89a48898398b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,51 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+const (
+	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
+	DefaultCollectorGRPCPort uint16 = 4317
+	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
+	DefaultCollectorHTTPPort uint16 = 4318
+	// DefaultCollectorHost is the host address the Exporter will attempt
+	// connect to if no collector address is provided.
+	DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression Compression = iota
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+	// MarshalProto tells the driver to send using the protobuf binary format.
+	MarshalProto Marshaler = iota
+	// MarshalJSON tells the driver to send using json format.
+	MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
new file mode 100644
index 0000000000000000000000000000000000000000..c342f7d683105b9656f0152ca23c2ed307afede4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
@@ -0,0 +1,37 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+
+	return &tls.Config{
+		RootCAs: cp,
+	}, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
new file mode 100644
index 0000000000000000000000000000000000000000..f051ad5d95cd3b36d4153b26bedb9850e129b71f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages.  Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+	ErrorMessage  string
+	RejectedItems int64
+	RejectedKind  string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+	msg := ps.ErrorMessage
+	if msg == "" {
+		msg = "empty message"
+	}
+	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+	_, ok := err.(PartialSuccess)
+	return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "spans",
+	}
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "metric data points",
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
new file mode 100644
index 0000000000000000000000000000000000000000..44974ff49bd74ff5afd48a2f45860f7d427647fc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+	Enabled:         true,
+	InitialInterval: 5 * time.Second,
+	MaxInterval:     30 * time.Second,
+	MaxElapsedTime:  time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+	// Enabled indicates whether to not retry sending batches in case of
+	// export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before
+	// retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is
+	// reached the delay between consecutive retries will always be
+	// `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent
+	// trying to send a request/batch.  Once this value is reached, the data
+	// is discarded.
+	MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+	if !c.Enabled {
+		return func(ctx context.Context, fn func(context.Context) error) error {
+			return fn(ctx)
+		}
+	}
+
+	return func(ctx context.Context, fn func(context.Context) error) error {
+		// Do not use NewExponentialBackOff since it calls Reset and the code here
+		// must call Reset after changing the InitialInterval (this saves an
+		// unnecessary call to Now).
+		b := &backoff.ExponentialBackOff{
+			InitialInterval:     c.InitialInterval,
+			RandomizationFactor: backoff.DefaultRandomizationFactor,
+			Multiplier:          backoff.DefaultMultiplier,
+			MaxInterval:         c.MaxInterval,
+			MaxElapsedTime:      c.MaxElapsedTime,
+			Stop:                backoff.Stop,
+			Clock:               backoff.SystemClock,
+		}
+		b.Reset()
+
+		for {
+			err := fn(ctx)
+			if err == nil {
+				return nil
+			}
+
+			retryable, throttle := evaluate(err)
+			if !retryable {
+				return err
+			}
+
+			bOff := b.NextBackOff()
+			if bOff == backoff.Stop {
+				return fmt.Errorf("max retry time elapsed: %w", err)
+			}
+
+			// Wait for the greater of the backoff or throttle delay.
+			var delay time.Duration
+			if bOff > throttle {
+				delay = bOff
+			} else {
+				elapsed := b.GetElapsedTime()
+				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+					return fmt.Errorf("max retry time would elapse: %w", err)
+				}
+				delay = throttle
+			}
+
+			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+				return fmt.Errorf("%w: %s", ctxErr, err)
+			}
+		}
+	}
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait.  It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline.  This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+	timer := time.NewTimer(delay)
+	defer timer.Stop()
+
+	select {
+	case <-ctx.Done():
+		// Handle the case where the timer and context deadline end
+		// simultaneously by prioritizing the timer expiration nil value
+		// response.
+		select {
+		case <-timer.C:
+		default:
+			return ctx.Err()
+		}
+	case <-timer.C:
+	}
+
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea6b767a27f646b7e47aec75555d3f3c54e1462e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
@@ -0,0 +1,145 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+
+import (
+	"crypto/tls"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression otlpconfig.Compression
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression = Compression(otlpconfig.NoCompression)
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression = Compression(otlpconfig.GzipCompression)
+)
+
+// Option applies an option to the HTTP client.
+type Option interface {
+	applyHTTPOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption {
+	converted := make([]otlpconfig.HTTPOption, len(opts))
+	for i, o := range opts {
+		converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying batches in case of export
+// failure using an exponential backoff.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	otlpconfig.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config {
+	return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithEndpoint sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+	return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
+// WithCompression tells the driver to compress the sent data.
+func WithCompression(compression Compression) Option {
+	return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
+}
+
+// WithURLPath allows one to override the default URL path used
+// for sending traces. If unset, default ("/v1/traces") will be used.
+func WithURLPath(urlPath string) Option {
+	return wrappedOption{otlpconfig.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig can be used to set up a custom TLS
+// configuration for the client used to send payloads to the
+// collector. Use it if you want to use a custom certificate.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+	return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure tells the driver to connect to the collector using the
+// HTTP scheme, instead of HTTPS.
+func WithInsecure() Option {
+	return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithHeaders allows one to tell the driver to send additional HTTP
+// headers with the payloads. Specifying headers like Content-Length,
+// Content-Encoding and Content-Type may result in a broken driver.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTimeout tells the driver the max waiting time for the backend to process
+// each spans batch.  If unset, the default will be 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry configures the retry policy for transient errors that may occurs
+// when exporting traces. An exponential back-off algorithm is used to ensure
+// endpoints are not overwhelmed with retries. If unset, the default retry
+// policy will retry after 5 seconds and increase exponentially after each
+// error for a total of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+	return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..54cda3a16a2c8875697384f85354e52787f59dbb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
+func Version() string {
+	return "1.23.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9a58fb1d372ea730dd388e7ae812e42b8b03a3d8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+top_dir='.'
+if [[ $# -gt 0 ]]; then
+    top_dir="${1}"
+fi
+
+p=$(pwd)
+mod_dirs=()
+
+# Note `mapfile` does not exist in older bash versions:
+# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash
+
+while IFS= read -r line; do
+    mod_dirs+=("$line")
+done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+for mod_dir in "${mod_dirs[@]}"; do
+    cd "${mod_dir}"
+
+    while IFS= read -r line; do
+        echo ".${line#${p}}"
+    done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
+    cd "${p}"
+done
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..4115fe3bbb5af0d87dbb23679477df5aa0d22fa9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -0,0 +1,48 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+var (
+	// Compile-time check global.ErrDelegator implements ErrorHandler.
+	_ ErrorHandler = (*global.ErrDelegator)(nil)
+	// Compile-time check global.ErrLogger implements ErrorHandler.
+	_ ErrorHandler = (*global.ErrLogger)(nil)
+)
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
+
+// Handle is a convenience function for ErrorHandler().Handle(err).
+func Handle(err error) { global.Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
new file mode 100644
index 0000000000000000000000000000000000000000..622c3ee3f276bf77f2ab8eaf69e204d1d4a15ae4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -0,0 +1,111 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package attribute provide several helper functions for some commonly used
+logic of processing attributes.
+*/
+package attribute // import "go.opentelemetry.io/otel/internal/attribute"
+
+import (
+	"reflect"
+)
+
+// BoolSliceValue converts a bool slice into an array with same elements as slice.
+func BoolSliceValue(v []bool) interface{} {
+	var zero bool
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
+	return cp.Elem().Interface()
+}
+
+// Int64SliceValue converts an int64 slice into an array with same elements as slice.
+func Int64SliceValue(v []int64) interface{} {
+	var zero int64
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
+	return cp.Elem().Interface()
+}
+
+// Float64SliceValue converts a float64 slice into an array with same elements as slice.
+func Float64SliceValue(v []float64) interface{} {
+	var zero float64
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
+	return cp.Elem().Interface()
+}
+
+// StringSliceValue converts a string slice into an array with same elements as slice.
+func StringSliceValue(v []string) interface{} {
+	var zero string
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
+	return cp.Elem().Interface()
+}
+
+// AsBoolSlice converts a bool array into a slice into with same elements as array.
+func AsBoolSlice(v interface{}) []bool {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero bool
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
+}
+
+// AsInt64Slice converts an int64 array into a slice into with same elements as array.
+func AsInt64Slice(v interface{}) []int64 {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero int64
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
+}
+
+// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
+func AsFloat64Slice(v interface{}) []float64 {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero float64
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
+}
+
+// AsStringSlice converts a string array into a slice into with same elements as array.
+func AsStringSlice(v interface{}) []string {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero string
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]string)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
new file mode 100644
index 0000000000000000000000000000000000000000..b96e5408e69a8325f7bdd6f03f02b1d37ceedba3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package baggage provides base types and functionality to store and retrieve
+baggage in Go context. This package exists because the OpenTracing bridge to
+OpenTelemetry needs to synchronize state whenever baggage for a context is
+modified and that context contains an OpenTracing span. If it were not for
+this need this package would not need to exist and the
+`go.opentelemetry.io/otel/baggage` package would be the singular place where
+W3C baggage is handled.
+*/
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+// List is the collection of baggage members. The W3C allows for duplicates,
+// but OpenTelemetry does not, therefore, this is represented as a map.
+type List map[string]Item
+
+// Item is the value and metadata properties part of a list-member.
+type Item struct {
+	Value      string
+	Properties []Property
+}
+
+// Property is a metadata entry for a list-member.
+type Property struct {
+	Key, Value string
+
+	// HasValue indicates if a zero-value value means the property does not
+	// have a value or if it was the zero-value.
+	HasValue bool
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..4469700d9cbb1d7198ab6466dd9f94a72c1a47d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+import "context"
+
+type baggageContextKeyType int
+
+const baggageKey baggageContextKeyType = iota
+
+// SetHookFunc is a callback called when storing baggage in the context.
+type SetHookFunc func(context.Context, List) context.Context
+
+// GetHookFunc is a callback called when getting baggage from the context.
+type GetHookFunc func(context.Context, List) List
+
+type baggageState struct {
+	list List
+
+	setHook SetHookFunc
+	getHook GetHookFunc
+}
+
+// ContextWithSetHook returns a copy of parent with hook configured to be
+// invoked every time ContextWithBaggage is called.
+//
+// Passing nil SetHookFunc creates a context with no set hook to call.
+func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
+	var s baggageState
+	if v, ok := parent.Value(baggageKey).(baggageState); ok {
+		s = v
+	}
+
+	s.setHook = hook
+	return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithGetHook returns a copy of parent with hook configured to be
+// invoked every time FromContext is called.
+//
+// Passing nil GetHookFunc creates a context with no get hook to call.
+func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
+	var s baggageState
+	if v, ok := parent.Value(baggageKey).(baggageState); ok {
+		s = v
+	}
+
+	s.getHook = hook
+	return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithList returns a copy of parent with baggage. Passing nil list
+// returns a context without any baggage.
+func ContextWithList(parent context.Context, list List) context.Context {
+	var s baggageState
+	if v, ok := parent.Value(baggageKey).(baggageState); ok {
+		s = v
+	}
+
+	s.list = list
+	ctx := context.WithValue(parent, baggageKey, s)
+	if s.setHook != nil {
+		ctx = s.setHook(ctx, list)
+	}
+
+	return ctx
+}
+
+// ListFromContext returns the baggage contained in ctx.
+func ListFromContext(ctx context.Context) List {
+	switch v := ctx.Value(baggageKey).(type) {
+	case baggageState:
+		if v.getHook != nil {
+			return v.getHook(ctx, v.list)
+		}
+		return v.list
+	default:
+		return nil
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..f532f07e9e529c324232406a2118ffc72bde35b8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/gen.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/internal"
+
+//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
+//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
+//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
+
+//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
+//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
+//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
+//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
+//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..5e9b83047924a6943c14d61e2ebf3d35657d9dac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -0,0 +1,102 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"log"
+	"os"
+	"sync/atomic"
+)
+
+var (
+	// GlobalErrorHandler provides an ErrorHandler that can be used
+	// throughout an OpenTelemetry instrumented project. When a user
+	// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
+	// `Handle` and will be delegated to the registered ErrorHandler.
+	GlobalErrorHandler = defaultErrorHandler()
+
+	// Compile-time check that delegator implements ErrorHandler.
+	_ ErrorHandler = (*ErrDelegator)(nil)
+	// Compile-time check that errLogger implements ErrorHandler.
+	_ ErrorHandler = (*ErrLogger)(nil)
+)
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+	// Handle handles any error deemed irremediable by an OpenTelemetry
+	// component.
+	Handle(error)
+}
+
+type ErrDelegator struct {
+	delegate atomic.Pointer[ErrorHandler]
+}
+
+func (d *ErrDelegator) Handle(err error) {
+	d.getDelegate().Handle(err)
+}
+
+func (d *ErrDelegator) getDelegate() ErrorHandler {
+	return *d.delegate.Load()
+}
+
+// setDelegate sets the ErrorHandler delegate.
+func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
+	d.delegate.Store(&eh)
+}
+
+func defaultErrorHandler() *ErrDelegator {
+	d := &ErrDelegator{}
+	d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
+	return d
+}
+
+// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
+type ErrLogger struct {
+	l *log.Logger
+}
+
+// Handle logs err if no delegate is set, otherwise it is delegated.
+func (h *ErrLogger) Handle(err error) {
+	h.l.Print(err)
+}
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+	return GlobalErrorHandler
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+	GlobalErrorHandler.setDelegate(h)
+}
+
+// Handle is a convenience function for ErrorHandler().Handle(err).
+func Handle(err error) {
+	GetErrorHandler().Handle(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
new file mode 100644
index 0000000000000000000000000000000000000000..ebb13c20678e6dfdee6ba31714ef1b35af9737b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -0,0 +1,371 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"context"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// unwrapper unwraps to return the underlying instrument implementation.
+type unwrapper interface {
+	Unwrap() metric.Observable
+}
+
+type afCounter struct {
+	embedded.Float64ObservableCounter
+	metric.Float64Observable
+
+	name string
+	opts []metric.Float64ObservableCounterOption
+
+	delegate atomic.Value // metric.Float64ObservableCounter
+}
+
+var (
+	_ unwrapper                       = (*afCounter)(nil)
+	_ metric.Float64ObservableCounter = (*afCounter)(nil)
+)
+
+func (i *afCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *afCounter) Unwrap() metric.Observable {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(metric.Float64ObservableCounter)
+	}
+	return nil
+}
+
+type afUpDownCounter struct {
+	embedded.Float64ObservableUpDownCounter
+	metric.Float64Observable
+
+	name string
+	opts []metric.Float64ObservableUpDownCounterOption
+
+	delegate atomic.Value // metric.Float64ObservableUpDownCounter
+}
+
+var (
+	_ unwrapper                             = (*afUpDownCounter)(nil)
+	_ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+)
+
+func (i *afUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *afUpDownCounter) Unwrap() metric.Observable {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(metric.Float64ObservableUpDownCounter)
+	}
+	return nil
+}
+
+type afGauge struct {
+	embedded.Float64ObservableGauge
+	metric.Float64Observable
+
+	name string
+	opts []metric.Float64ObservableGaugeOption
+
+	delegate atomic.Value // metric.Float64ObservableGauge
+}
+
+var (
+	_ unwrapper                     = (*afGauge)(nil)
+	_ metric.Float64ObservableGauge = (*afGauge)(nil)
+)
+
+func (i *afGauge) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *afGauge) Unwrap() metric.Observable {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(metric.Float64ObservableGauge)
+	}
+	return nil
+}
+
+type aiCounter struct {
+	embedded.Int64ObservableCounter
+	metric.Int64Observable
+
+	name string
+	opts []metric.Int64ObservableCounterOption
+
+	delegate atomic.Value // metric.Int64ObservableCounter
+}
+
+var (
+	_ unwrapper                     = (*aiCounter)(nil)
+	_ metric.Int64ObservableCounter = (*aiCounter)(nil)
+)
+
+func (i *aiCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *aiCounter) Unwrap() metric.Observable {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(metric.Int64ObservableCounter)
+	}
+	return nil
+}
+
+type aiUpDownCounter struct {
+	embedded.Int64ObservableUpDownCounter
+	metric.Int64Observable
+
+	name string
+	opts []metric.Int64ObservableUpDownCounterOption
+
+	delegate atomic.Value // metric.Int64ObservableUpDownCounter
+}
+
+var (
+	_ unwrapper                           = (*aiUpDownCounter)(nil)
+	_ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+)
+
+func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *aiUpDownCounter) Unwrap() metric.Observable {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(metric.Int64ObservableUpDownCounter)
+	}
+	return nil
+}
+
+type aiGauge struct {
+	embedded.Int64ObservableGauge
+	metric.Int64Observable
+
+	name string
+	opts []metric.Int64ObservableGaugeOption
+
+	delegate atomic.Value // metric.Int64ObservableGauge
+}
+
+var (
+	_ unwrapper                   = (*aiGauge)(nil)
+	_ metric.Int64ObservableGauge = (*aiGauge)(nil)
+)
+
+func (i *aiGauge) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *aiGauge) Unwrap() metric.Observable {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(metric.Int64ObservableGauge)
+	}
+	return nil
+}
+
+// Sync Instruments.
+type sfCounter struct {
+	embedded.Float64Counter
+
+	name string
+	opts []metric.Float64CounterOption
+
+	delegate atomic.Value // metric.Float64Counter
+}
+
+var _ metric.Float64Counter = (*sfCounter)(nil)
+
+func (i *sfCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64Counter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(metric.Float64Counter).Add(ctx, incr, opts...)
+	}
+}
+
+type sfUpDownCounter struct {
+	embedded.Float64UpDownCounter
+
+	name string
+	opts []metric.Float64UpDownCounterOption
+
+	delegate atomic.Value // metric.Float64UpDownCounter
+}
+
+var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
+
+func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...)
+	}
+}
+
+type sfHistogram struct {
+	embedded.Float64Histogram
+
+	name string
+	opts []metric.Float64HistogramOption
+
+	delegate atomic.Value // metric.Float64Histogram
+}
+
+var _ metric.Float64Histogram = (*sfHistogram)(nil)
+
+func (i *sfHistogram) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64Histogram(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(metric.Float64Histogram).Record(ctx, x, opts...)
+	}
+}
+
+type siCounter struct {
+	embedded.Int64Counter
+
+	name string
+	opts []metric.Int64CounterOption
+
+	delegate atomic.Value // metric.Int64Counter
+}
+
+var _ metric.Int64Counter = (*siCounter)(nil)
+
+func (i *siCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64Counter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(metric.Int64Counter).Add(ctx, x, opts...)
+	}
+}
+
+type siUpDownCounter struct {
+	embedded.Int64UpDownCounter
+
+	name string
+	opts []metric.Int64UpDownCounterOption
+
+	delegate atomic.Value // metric.Int64UpDownCounter
+}
+
+var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
+
+func (i *siUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...)
+	}
+}
+
+type siHistogram struct {
+	embedded.Int64Histogram
+
+	name string
+	opts []metric.Int64HistogramOption
+
+	delegate atomic.Value // metric.Int64Histogram
+}
+
+var _ metric.Int64Histogram = (*siHistogram)(nil)
+
+func (i *siHistogram) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64Histogram(i.name, i.opts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
new file mode 100644
index 0000000000000000000000000000000000000000..c6f305a2b76abfa3f02f2465dc1aefbf0e73069d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"log"
+	"os"
+	"sync/atomic"
+
+	"github.com/go-logr/logr"
+	"github.com/go-logr/stdr"
+)
+
+// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
+//
+// The default logger uses stdr which is backed by the standard `log.Logger`
+// interface. This logger will only show messages at the Error Level.
+var globalLogger atomic.Pointer[logr.Logger]
+
+func init() {
+	SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+}
+
+// SetLogger overrides the globalLogger with l.
+//
+// To see Warn messages use a logger with `l.V(1).Enabled() == true`
+// To see Info messages use a logger with `l.V(4).Enabled() == true`
+// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
+func SetLogger(l logr.Logger) {
+	globalLogger.Store(&l)
+}
+
+func getLogger() logr.Logger {
+	return *globalLogger.Load()
+}
+
+// Info prints messages about the general state of the API or SDK.
+// This should usually be less than 5 messages a minute.
+func Info(msg string, keysAndValues ...interface{}) {
+	getLogger().V(4).Info(msg, keysAndValues...)
+}
+
+// Error prints messages about exceptional states of the API or SDK.
+func Error(err error, msg string, keysAndValues ...interface{}) {
+	getLogger().Error(err, msg, keysAndValues...)
+}
+
+// Debug prints messages about all internal changes in the API or SDK.
+func Debug(msg string, keysAndValues ...interface{}) {
+	getLogger().V(8).Info(msg, keysAndValues...)
+}
+
+// Warn prints messages about warnings in the API or SDK.
+// Not an error but is likely more important than an informational event.
+func Warn(msg string, keysAndValues ...interface{}) {
+	getLogger().V(1).Info(msg, keysAndValues...)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
new file mode 100644
index 0000000000000000000000000000000000000000..0097db478c6aa75d2983d8c052c741d9b4deb99a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -0,0 +1,354 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"container/list"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// meterProvider is a placeholder for a configured SDK MeterProvider.
+//
+// All MeterProvider functionality is forwarded to a delegate once
+// configured.
+type meterProvider struct {
+	embedded.MeterProvider
+
+	mtx    sync.Mutex
+	meters map[il]*meter
+
+	delegate metric.MeterProvider
+}
+
+// setDelegate configures p to delegate all MeterProvider functionality to
+// provider.
+//
+// All Meters provided prior to this function call are switched out to be
+// Meters provided by provider. All instruments and callbacks are recreated and
+// delegated.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	p.delegate = provider
+
+	if len(p.meters) == 0 {
+		return
+	}
+
+	for _, meter := range p.meters {
+		meter.setDelegate(provider)
+	}
+
+	p.meters = nil
+}
+
+// Meter implements MeterProvider.
+func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	if p.delegate != nil {
+		return p.delegate.Meter(name, opts...)
+	}
+
+	// At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
+
+	c := metric.NewMeterConfig(opts...)
+	key := il{
+		name:    name,
+		version: c.InstrumentationVersion(),
+	}
+
+	if p.meters == nil {
+		p.meters = make(map[il]*meter)
+	}
+
+	if val, ok := p.meters[key]; ok {
+		return val
+	}
+
+	t := &meter{name: name, opts: opts}
+	p.meters[key] = t
+	return t
+}
+
+// meter is a placeholder for a metric.Meter.
+//
+// All Meter functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopMeter.
+type meter struct {
+	embedded.Meter
+
+	name string
+	opts []metric.MeterOption
+
+	mtx         sync.Mutex
+	instruments []delegatedInstrument
+
+	registry list.List
+
+	delegate atomic.Value // metric.Meter
+}
+
+type delegatedInstrument interface {
+	setDelegate(metric.Meter)
+}
+
+// setDelegate configures m to delegate all Meter functionality to Meters
+// created by provider.
+//
+// All subsequent calls to the Meter methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (m *meter) setDelegate(provider metric.MeterProvider) {
+	meter := provider.Meter(m.name, m.opts...)
+	m.delegate.Store(meter)
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	for _, inst := range m.instruments {
+		inst.setDelegate(meter)
+	}
+
+	for e := m.registry.Front(); e != nil; e = e.Next() {
+		r := e.Value.(*registration)
+		r.setDelegate(meter)
+		m.registry.Remove(e)
+	}
+
+	m.instruments = nil
+	m.registry.Init()
+}
+
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64Counter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &siCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64UpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &siUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64Histogram(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &siHistogram{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64ObservableCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &aiCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64ObservableUpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &aiUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64ObservableGauge(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &aiGauge{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64Counter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &sfCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64UpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &sfUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64Histogram(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &sfHistogram{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64ObservableCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &afCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64ObservableUpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &afUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64ObservableGauge(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &afGauge{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+// RegisterCallback captures the function that will be called during Collect.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		insts = unwrapInstruments(insts)
+		return del.RegisterCallback(f, insts...)
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	reg := &registration{instruments: insts, function: f}
+	e := m.registry.PushBack(reg)
+	reg.unreg = func() error {
+		m.mtx.Lock()
+		_ = m.registry.Remove(e)
+		m.mtx.Unlock()
+		return nil
+	}
+	return reg, nil
+}
+
+type wrapped interface {
+	unwrap() metric.Observable
+}
+
+func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
+	out := make([]metric.Observable, 0, len(instruments))
+
+	for _, inst := range instruments {
+		if in, ok := inst.(wrapped); ok {
+			out = append(out, in.unwrap())
+		} else {
+			out = append(out, inst)
+		}
+	}
+
+	return out
+}
+
+type registration struct {
+	embedded.Registration
+
+	instruments []metric.Observable
+	function    metric.Callback
+
+	unreg   func() error
+	unregMu sync.Mutex
+}
+
+func (c *registration) setDelegate(m metric.Meter) {
+	insts := unwrapInstruments(c.instruments)
+
+	c.unregMu.Lock()
+	defer c.unregMu.Unlock()
+
+	if c.unreg == nil {
+		// Unregister already called.
+		return
+	}
+
+	reg, err := m.RegisterCallback(c.function, insts...)
+	if err != nil {
+		GetErrorHandler().Handle(err)
+	}
+
+	c.unreg = reg.Unregister
+}
+
+func (c *registration) Unregister() error {
+	c.unregMu.Lock()
+	defer c.unregMu.Unlock()
+	if c.unreg == nil {
+		// Unregister already called.
+		return nil
+	}
+
+	var err error
+	err, c.unreg = c.unreg(), nil
+	return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
new file mode 100644
index 0000000000000000000000000000000000000000..06bac35c2fe59f212c52a10c300384cd1a95e810
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"context"
+	"sync"
+
+	"go.opentelemetry.io/otel/propagation"
+)
+
+// textMapPropagator is a default TextMapPropagator that delegates calls to a
+// registered delegate if one is set, otherwise it defaults to delegating the
+// calls to a the default no-op propagation.TextMapPropagator.
+type textMapPropagator struct {
+	mtx      sync.Mutex
+	once     sync.Once
+	delegate propagation.TextMapPropagator
+	noop     propagation.TextMapPropagator
+}
+
+// Compile-time guarantee that textMapPropagator implements the
+// propagation.TextMapPropagator interface.
+var _ propagation.TextMapPropagator = (*textMapPropagator)(nil)
+
+func newTextMapPropagator() *textMapPropagator {
+	return &textMapPropagator{
+		noop: propagation.NewCompositeTextMapPropagator(),
+	}
+}
+
+// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are
+// forwarded to. Delegation can only be performed once, all subsequent calls
+// perform no delegation.
+func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) {
+	if delegate == nil {
+		return
+	}
+
+	p.mtx.Lock()
+	p.once.Do(func() { p.delegate = delegate })
+	p.mtx.Unlock()
+}
+
+// effectiveDelegate returns the current delegate of p if one is set,
+// otherwise the default noop TextMapPropagator is returned. This method
+// can be called concurrently.
+func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+	if p.delegate != nil {
+		return p.delegate
+	}
+	return p.noop
+}
+
+// Inject set cross-cutting concerns from the Context into the carrier.
+func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
+	p.effectiveDelegate().Inject(ctx, carrier)
+}
+
+// Extract reads cross-cutting concerns from the carrier into a Context.
+func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
+	return p.effectiveDelegate().Extract(ctx, carrier)
+}
+
+// Fields returns the keys whose values are set with Inject.
+func (p *textMapPropagator) Fields() []string {
+	return p.effectiveDelegate().Fields()
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
new file mode 100644
index 0000000000000000000000000000000000000000..7985005bcb6d510a9e8bf1873972cfd32e2be00e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"errors"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/propagation"
+	"go.opentelemetry.io/otel/trace"
+)
+
+type (
+	tracerProviderHolder struct {
+		tp trace.TracerProvider
+	}
+
+	propagatorsHolder struct {
+		tm propagation.TextMapPropagator
+	}
+
+	meterProviderHolder struct {
+		mp metric.MeterProvider
+	}
+)
+
+var (
+	globalTracer        = defaultTracerValue()
+	globalPropagators   = defaultPropagatorsValue()
+	globalMeterProvider = defaultMeterProvider()
+
+	delegateTraceOnce             sync.Once
+	delegateTextMapPropagatorOnce sync.Once
+	delegateMeterOnce             sync.Once
+)
+
+// TracerProvider is the internal implementation for global.TracerProvider.
+func TracerProvider() trace.TracerProvider {
+	return globalTracer.Load().(tracerProviderHolder).tp
+}
+
+// SetTracerProvider is the internal implementation for global.SetTracerProvider.
+func SetTracerProvider(tp trace.TracerProvider) {
+	current := TracerProvider()
+
+	if _, cOk := current.(*tracerProvider); cOk {
+		if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
+			// Do not assign the default delegating TracerProvider to delegate
+			// to itself.
+			Error(
+				errors.New("no delegate configured in tracer provider"),
+				"Setting tracer provider to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	delegateTraceOnce.Do(func() {
+		if def, ok := current.(*tracerProvider); ok {
+			def.setDelegate(tp)
+		}
+	})
+	globalTracer.Store(tracerProviderHolder{tp: tp})
+}
+
+// TextMapPropagator is the internal implementation for global.TextMapPropagator.
+func TextMapPropagator() propagation.TextMapPropagator {
+	return globalPropagators.Load().(propagatorsHolder).tm
+}
+
+// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
+func SetTextMapPropagator(p propagation.TextMapPropagator) {
+	current := TextMapPropagator()
+
+	if _, cOk := current.(*textMapPropagator); cOk {
+		if _, pOk := p.(*textMapPropagator); pOk && current == p {
+			// Do not assign the default delegating TextMapPropagator to
+			// delegate to itself.
+			Error(
+				errors.New("no delegate configured in text map propagator"),
+				"Setting text map propagator to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	// For the textMapPropagator already returned by TextMapPropagator
+	// delegate to p.
+	delegateTextMapPropagatorOnce.Do(func() {
+		if def, ok := current.(*textMapPropagator); ok {
+			def.SetDelegate(p)
+		}
+	})
+	// Return p when subsequent calls to TextMapPropagator are made.
+	globalPropagators.Store(propagatorsHolder{tm: p})
+}
+
+// MeterProvider is the internal implementation for global.MeterProvider.
+func MeterProvider() metric.MeterProvider {
+	return globalMeterProvider.Load().(meterProviderHolder).mp
+}
+
+// SetMeterProvider is the internal implementation for global.SetMeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+	current := MeterProvider()
+	if _, cOk := current.(*meterProvider); cOk {
+		if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
+			// Do not assign the default delegating MeterProvider to delegate
+			// to itself.
+			Error(
+				errors.New("no delegate configured in meter provider"),
+				"Setting meter provider to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	delegateMeterOnce.Do(func() {
+		if def, ok := current.(*meterProvider); ok {
+			def.setDelegate(mp)
+		}
+	})
+	globalMeterProvider.Store(meterProviderHolder{mp: mp})
+}
+
+func defaultTracerValue() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(tracerProviderHolder{tp: &tracerProvider{}})
+	return v
+}
+
+func defaultPropagatorsValue() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(propagatorsHolder{tm: newTextMapPropagator()})
+	return v
+}
+
+func defaultMeterProvider() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(meterProviderHolder{mp: &meterProvider{}})
+	return v
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f61ec12a34fa3e235633e5c9e83e09e03cd68ad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+/*
+This file contains the forwarding implementation of the TracerProvider used as
+the default global instance. Prior to initialization of an SDK, Tracers
+returned by the global TracerProvider will provide no-op functionality. This
+means that all Span created prior to initialization are no-op Spans.
+
+Once an SDK has been initialized, all provided no-op Tracers are swapped for
+Tracers provided by the SDK defined TracerProvider. However, any Span started
+prior to this initialization does not change its behavior. Meaning, the Span
+remains a no-op Span.
+
+The implementation to track and swap Tracers locks all new Tracer creation
+until the swap is complete. This assumes that this operation is not
+performance-critical. If that assumption is incorrect, be sure to configure an
+SDK prior to any Tracer creation.
+*/
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+// tracerProvider is a placeholder for a configured SDK TracerProvider.
+//
+// All TracerProvider functionality is forwarded to a delegate once
+// configured.
+type tracerProvider struct {
+	embedded.TracerProvider
+
+	mtx      sync.Mutex
+	tracers  map[il]*tracer
+	delegate trace.TracerProvider
+}
+
+// Compile-time guarantee that tracerProvider implements the TracerProvider
+// interface.
+var _ trace.TracerProvider = &tracerProvider{}
+
+// setDelegate configures p to delegate all TracerProvider functionality to
+// provider.
+//
+// All Tracers provided prior to this function call are switched out to be
+// Tracers provided by provider.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	p.delegate = provider
+
+	if len(p.tracers) == 0 {
+		return
+	}
+
+	for _, t := range p.tracers {
+		t.setDelegate(provider)
+	}
+
+	p.tracers = nil
+}
+
+// Tracer implements TracerProvider.
+func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	if p.delegate != nil {
+		return p.delegate.Tracer(name, opts...)
+	}
+
+	// At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
+
+	c := trace.NewTracerConfig(opts...)
+	key := il{
+		name:    name,
+		version: c.InstrumentationVersion(),
+	}
+
+	if p.tracers == nil {
+		p.tracers = make(map[il]*tracer)
+	}
+
+	if val, ok := p.tracers[key]; ok {
+		return val
+	}
+
+	t := &tracer{name: name, opts: opts, provider: p}
+	p.tracers[key] = t
+	return t
+}
+
+type il struct {
+	name    string
+	version string
+}
+
+// tracer is a placeholder for a trace.Tracer.
+//
+// All Tracer functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopTracer.
+type tracer struct {
+	embedded.Tracer
+
+	name     string
+	opts     []trace.TracerOption
+	provider *tracerProvider
+
+	delegate atomic.Value
+}
+
+// Compile-time guarantee that tracer implements the trace.Tracer interface.
+var _ trace.Tracer = &tracer{}
+
+// setDelegate configures t to delegate all Tracer functionality to Tracers
+// created by provider.
+//
+// All subsequent calls to the Tracer methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (t *tracer) setDelegate(provider trace.TracerProvider) {
+	t.delegate.Store(provider.Tracer(t.name, t.opts...))
+}
+
+// Start implements trace.Tracer by forwarding the call to t.delegate if
+// set, otherwise it forwards the call to a NoopTracer.
+func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+	delegate := t.delegate.Load()
+	if delegate != nil {
+		return delegate.(trace.Tracer).Start(ctx, name, opts...)
+	}
+
+	s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
+	ctx = trace.ContextWithSpan(ctx, s)
+	return ctx, s
+}
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+	embedded.Span
+
+	sc     trace.SpanContext
+	tracer *tracer
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..e07e7940004c289115f35dd21905ae4117784fc6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -0,0 +1,55 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/internal"
+
+import (
+	"math"
+	"unsafe"
+)
+
+func BoolToRaw(b bool) uint64 { // nolint:revive  // b is not a control flag.
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func RawToBool(r uint64) bool {
+	return r != 0
+}
+
+func Int64ToRaw(i int64) uint64 {
+	return uint64(i)
+}
+
+func RawToInt64(r uint64) int64 {
+	return int64(r)
+}
+
+func Float64ToRaw(f float64) uint64 {
+	return math.Float64bits(f)
+}
+
+func RawToFloat64(r uint64) float64 {
+	return math.Float64frombits(r)
+}
+
+func RawPtrToFloat64Ptr(r *uint64) *float64 {
+	return (*float64)(unsafe.Pointer(r))
+}
+
+func RawPtrToInt64Ptr(r *uint64) *int64 {
+	return (*int64)(unsafe.Pointer(r))
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
new file mode 100644
index 0000000000000000000000000000000000000000..c4f8acd5d837606fae75e49360a30684b475446f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal_logging.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"github.com/go-logr/logr"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// SetLogger configures the logger used internally to opentelemetry.
+func SetLogger(logger logr.Logger) {
+	global.SetLogger(logger)
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
new file mode 100644
index 0000000000000000000000000000000000000000..f955171951fdfe5f4523b1f9610013b4c7c1541e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/metric"
+)
+
+// Meter returns a Meter from the global MeterProvider. The name must be the
+// name of the library providing instrumentation. This name may be the same as
+// the instrumented code only if that code provides built-in instrumentation.
+// If the name is empty, then a implementation defined default name will be
+// used instead.
+//
+// If this is called before a global MeterProvider is registered the returned
+// Meter will be a No-op implementation of a Meter. When a global MeterProvider
+// is registered for the first time, the returned Meter, and all the
+// instruments it has created or will create, are recreated automatically from
+// the new MeterProvider.
+//
+// This is short for GetMeterProvider().Meter(name).
+func Meter(name string, opts ...metric.MeterOption) metric.Meter {
+	return GetMeterProvider().Meter(name, opts...)
+}
+
+// GetMeterProvider returns the registered global meter provider.
+//
+// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
+// implementation is returned. When a global GetMeterProvider is registered for
+// the first time, the returned GetMeterProvider, and all the Meters it has
+// created or will create, are recreated automatically from the new
+// GetMeterProvider.
+func GetMeterProvider() metric.MeterProvider {
+	return global.MeterProvider()
+}
+
+// SetMeterProvider registers mp as the global MeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+	global.SetMeterProvider(mp)
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
new file mode 100644
index 0000000000000000000000000000000000000000..072baa8e8d0aa7476f80cea866099c081b293448
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -0,0 +1,271 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Float64Observable describes a set of instruments used asynchronously to
+// record float64 measurements once per collection cycle. Observations of
+// these instruments are only made within a callback.
+//
+// Warning: Methods may be added to this interface in minor releases.
+type Float64Observable interface {
+	Observable
+
+	float64Observable()
+}
+
+// Float64ObservableCounter is an instrument used to asynchronously record
+// increasing float64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for
+// unimplemented methods.
+type Float64ObservableCounter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64ObservableCounter
+
+	Float64Observable
+}
+
+// Float64ObservableCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Float64ObservableCounterConfig struct {
+	description string
+	unit        string
+	callbacks   []Float64Callback
+}
+
+// NewFloat64ObservableCounterConfig returns a new
+// [Float64ObservableCounterConfig] with all opts applied.
+func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig {
+	var config Float64ObservableCounterConfig
+	for _, o := range opts {
+		config = o.applyFloat64ObservableCounter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableCounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableCounterConfig) Unit() string {
+	return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
+	return c.callbacks
+}
+
+// Float64ObservableCounterOption applies options to a
+// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableCounterOption.
+type Float64ObservableCounterOption interface {
+	applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
+}
+
+// Float64ObservableUpDownCounter is an instrument used to asynchronously
+// record float64 measurements once per collection cycle. Observations are only
+// made within a callback for this instrument. The value observed is assumed
+// the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64ObservableUpDownCounter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64ObservableUpDownCounter
+
+	Float64Observable
+}
+
+// Float64ObservableUpDownCounterConfig contains options for asynchronous
+// counter instruments that record int64 values.
+type Float64ObservableUpDownCounterConfig struct {
+	description string
+	unit        string
+	callbacks   []Float64Callback
+}
+
+// NewFloat64ObservableUpDownCounterConfig returns a new
+// [Float64ObservableUpDownCounterConfig] with all opts applied.
+func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig {
+	var config Float64ObservableUpDownCounterConfig
+	for _, o := range opts {
+		config = o.applyFloat64ObservableUpDownCounter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableUpDownCounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableUpDownCounterConfig) Unit() string {
+	return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
+	return c.callbacks
+}
+
+// Float64ObservableUpDownCounterOption applies options to a
+// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableUpDownCounterOption.
+type Float64ObservableUpDownCounterOption interface {
+	applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
+}
+
+// Float64ObservableGauge is an instrument used to asynchronously record
+// instantaneous float64 measurements once per collection cycle. Observations
+// are only made within a callback for this instrument.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64ObservableGauge interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64ObservableGauge
+
+	Float64Observable
+}
+
+// Float64ObservableGaugeConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Float64ObservableGaugeConfig struct {
+	description string
+	unit        string
+	callbacks   []Float64Callback
+}
+
+// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig]
+// with all opts applied.
+func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig {
+	var config Float64ObservableGaugeConfig
+	for _, o := range opts {
+		config = o.applyFloat64ObservableGauge(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableGaugeConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableGaugeConfig) Unit() string {
+	return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
+	return c.callbacks
+}
+
+// Float64ObservableGaugeOption applies options to a
+// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableGaugeOption.
+type Float64ObservableGaugeOption interface {
+	applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
+}
+
+// Float64Observer is a recorder of float64 measurements.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Observer interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64Observer
+
+	// Observe records the float64 value.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Observe(value float64, options ...ObserveOption)
+}
+
+// Float64Callback is a function registered with a Meter that makes
+// observations for a Float64Observerable instrument it is registered with.
+// Calls to the Float64Observer record measurement values for the
+// Float64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Float64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Float64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Float64Callback func(context.Context, Float64Observer) error
+
+// Float64ObservableOption applies options to float64 Observer instruments.
+type Float64ObservableOption interface {
+	Float64ObservableCounterOption
+	Float64ObservableUpDownCounterOption
+	Float64ObservableGaugeOption
+}
+
+type float64CallbackOpt struct {
+	cback Float64Callback
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+	cfg.callbacks = append(cfg.callbacks, o.cback)
+	return cfg
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
+	cfg.callbacks = append(cfg.callbacks, o.cback)
+	return cfg
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+	cfg.callbacks = append(cfg.callbacks, o.cback)
+	return cfg
+}
+
+// WithFloat64Callback adds callback to be called for an instrument.
+func WithFloat64Callback(callback Float64Callback) Float64ObservableOption {
+	return float64CallbackOpt{callback}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
new file mode 100644
index 0000000000000000000000000000000000000000..9bd6ebf0205463404309beee8ef68fa59023a942
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -0,0 +1,269 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Int64Observable describes a set of instruments used asynchronously to record
+// int64 measurements once per collection cycle. Observations of these
+// instruments are only made within a callback.
+//
+// Warning: Methods may be added to this interface in minor releases.
+type Int64Observable interface {
+	Observable
+
+	int64Observable()
+}
+
+// Int64ObservableCounter is an instrument used to asynchronously record
+// increasing int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableCounter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64ObservableCounter
+
+	Int64Observable
+}
+
+// Int64ObservableCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableCounterConfig struct {
+	description string
+	unit        string
+	callbacks   []Int64Callback
+}
+
+// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig]
+// with all opts applied.
+func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig {
+	var config Int64ObservableCounterConfig
+	for _, o := range opts {
+		config = o.applyInt64ObservableCounter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableCounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableCounterConfig) Unit() string {
+	return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
+	return c.callbacks
+}
+
+// Int64ObservableCounterOption applies options to a
+// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableCounterOption.
+type Int64ObservableCounterOption interface {
+	applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
+}
+
+// Int64ObservableUpDownCounter is an instrument used to asynchronously record
+// int64 measurements once per collection cycle. Observations are only made
+// within a callback for this instrument. The value observed is assumed the to
+// be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableUpDownCounter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64ObservableUpDownCounter
+
+	Int64Observable
+}
+
+// Int64ObservableUpDownCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableUpDownCounterConfig struct {
+	description string
+	unit        string
+	callbacks   []Int64Callback
+}
+
+// NewInt64ObservableUpDownCounterConfig returns a new
+// [Int64ObservableUpDownCounterConfig] with all opts applied.
+func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig {
+	var config Int64ObservableUpDownCounterConfig
+	for _, o := range opts {
+		config = o.applyInt64ObservableUpDownCounter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableUpDownCounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableUpDownCounterConfig) Unit() string {
+	return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
+	return c.callbacks
+}
+
+// Int64ObservableUpDownCounterOption applies options to a
+// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableUpDownCounterOption.
+type Int64ObservableUpDownCounterOption interface {
+	applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
+}
+
+// Int64ObservableGauge is an instrument used to asynchronously record
+// instantaneous int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableGauge interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64ObservableGauge
+
+	Int64Observable
+}
+
+// Int64ObservableGaugeConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableGaugeConfig struct {
+	description string
+	unit        string
+	callbacks   []Int64Callback
+}
+
+// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig]
+// with all opts applied.
+func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig {
+	var config Int64ObservableGaugeConfig
+	for _, o := range opts {
+		config = o.applyInt64ObservableGauge(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableGaugeConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableGaugeConfig) Unit() string {
+	return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
+	return c.callbacks
+}
+
+// Int64ObservableGaugeOption applies options to a
+// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableGaugeOption.
+type Int64ObservableGaugeOption interface {
+	applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
+}
+
+// Int64Observer is a recorder of int64 measurements.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Observer interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64Observer
+
+	// Observe records the int64 value.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Observe(value int64, options ...ObserveOption)
+}
+
+// Int64Callback is a function registered with a Meter that makes observations
+// for an Int64Observerable instrument it is registered with. Calls to the
+// Int64Observer record measurement values for the Int64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Int64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Int64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Int64Callback func(context.Context, Int64Observer) error
+
+// Int64ObservableOption applies options to int64 Observer instruments.
+type Int64ObservableOption interface {
+	Int64ObservableCounterOption
+	Int64ObservableUpDownCounterOption
+	Int64ObservableGaugeOption
+}
+
+type int64CallbackOpt struct {
+	cback Int64Callback
+}
+
+func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+	cfg.callbacks = append(cfg.callbacks, o.cback)
+	return cfg
+}
+
+func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
+	cfg.callbacks = append(cfg.callbacks, o.cback)
+	return cfg
+}
+
+func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+	cfg.callbacks = append(cfg.callbacks, o.cback)
+	return cfg
+}
+
+// WithInt64Callback adds callback to be called for an instrument.
+func WithInt64Callback(callback Int64Callback) Int64ObservableOption {
+	return int64CallbackOpt{callback}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..778ad2d748b56d887441de6174a1cc8b4acb38c1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// MeterConfig contains options for Meters.
+type MeterConfig struct {
+	instrumentationVersion string
+	schemaURL              string
+	attrs                  attribute.Set
+
+	// Ensure forward compatibility by explicitly making this not comparable.
+	noCmp [0]func() //nolint: unused  // This is indeed used.
+}
+
+// InstrumentationVersion returns the version of the library providing
+// instrumentation.
+func (cfg MeterConfig) InstrumentationVersion() string {
+	return cfg.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
+	return cfg.attrs
+}
+
+// SchemaURL is the schema_url of the library providing instrumentation.
+func (cfg MeterConfig) SchemaURL() string {
+	return cfg.schemaURL
+}
+
+// MeterOption is an interface for applying Meter options.
+type MeterOption interface {
+	// applyMeter is used to set a MeterOption value of a MeterConfig.
+	applyMeter(MeterConfig) MeterConfig
+}
+
+// NewMeterConfig creates a new MeterConfig and applies
+// all the given options.
+func NewMeterConfig(opts ...MeterOption) MeterConfig {
+	var config MeterConfig
+	for _, o := range opts {
+		config = o.applyMeter(config)
+	}
+	return config
+}
+
+type meterOptionFunc func(MeterConfig) MeterConfig
+
+func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
+	return fn(cfg)
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) MeterOption {
+	return meterOptionFunc(func(config MeterConfig) MeterConfig {
+		config.instrumentationVersion = version
+		return config
+	})
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+	return meterOptionFunc(func(config MeterConfig) MeterConfig {
+		config.attrs = attribute.NewSet(attr...)
+		return config
+	})
+}
+
+// WithSchemaURL sets the schema URL.
+func WithSchemaURL(schemaURL string) MeterOption {
+	return meterOptionFunc(func(config MeterConfig) MeterConfig {
+		config.schemaURL = schemaURL
+		return config
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..54716e13b3551842b597e8e2889ebc294797a5de
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -0,0 +1,170 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package metric provides the OpenTelemetry API used to measure metrics about
+source code operation.
+
+This API is separate from its implementation so the instrumentation built from
+it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official
+OpenTelemetry implementation of this API.
+
+All measurements made with this package are made via instruments. These
+instruments are created by a [Meter] which itself is created by a
+[MeterProvider]. Applications need to accept a [MeterProvider] implementation
+as a starting point when instrumenting. This can be done directly, or by using
+the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an
+appropriately named [Meter] from the accepted [MeterProvider], instrumentation
+can then be built from the [Meter]'s instruments.
+
+# Instruments
+
+Each instrument is designed to make measurements of a particular type. Broadly,
+all instruments fall into two overlapping logical categories: asynchronous or
+synchronous, and int64 or float64.
+
+All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
+[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
+[Float64Histogram]) are used to measure the operation and performance of source
+code during the source code execution. These instruments only make measurements
+when the source code they instrument is run.
+
+All asynchronous instruments ([Int64ObservableCounter],
+[Int64ObservableUpDownCounter], [Int64ObservableGauge],
+[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
+[Float64ObservableGauge]) are used to measure metrics outside of the execution
+of source code. They are said to make "observations" via a callback function
+called once every measurement collection cycle.
+
+Each instrument is also grouped by the value type it measures. Either int64 or
+float64. The value being measured will dictate which instrument in these
+categories to use.
+
+Outside of these two broad categories, instruments are described by the
+function they are designed to serve. All Counters ([Int64Counter],
+[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
+designed to measure values that never decrease in value, but instead only
+incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
+[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
+[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
+values that can increase and decrease. When more information needs to be
+conveyed about all the synchronous measurements made during a collection cycle,
+a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
+when just the most recent measurement needs to be conveyed about an
+asynchronous measurement, a Gauge ([Int64ObservableGauge] and
+[Float64ObservableGauge]) should be used.
+
+See the [OpenTelemetry documentation] for more information about instruments
+and their intended use.
+
+# Measurements
+
+Measurements are made by recording values and information about the values with
+an instrument. How these measurements are recorded depends on the instrument.
+
+Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
+[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
+[Float64Histogram]) are recorded using the instrument methods directly. All
+counter instruments have an Add method that is used to measure an increment
+value, and all histogram instruments have a Record method to measure a data
+point.
+
+Asynchronous instruments ([Int64ObservableCounter],
+[Int64ObservableUpDownCounter], [Int64ObservableGauge],
+[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
+[Float64ObservableGauge]) record measurements within a callback function. The
+callback is registered with the Meter which ensures the callback is called once
+per collection cycle. A callback can be registered two ways: during the
+instrument's creation using an option, or later using the RegisterCallback
+method of the [Meter] that created the instrument.
+
+If the following criteria are met, an option ([WithInt64Callback] or
+[WithFloat64Callback]) can be used during the asynchronous instrument's
+creation to register a callback ([Int64Callback] or [Float64Callback],
+respectively):
+
+  - The measurement process is known when the instrument is created
+  - Only that instrument will make a measurement within the callback
+  - The callback never needs to be unregistered
+
+If the criteria are not met, use the RegisterCallback method of the [Meter] that
+created the instrument to register a [Callback].
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy, all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+  - Compilation failure
+  - Panic
+  - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/metric/embedded] in their implementation. For
+example,
+
+	import "go.opentelemetry.io/otel/metric/embedded"
+
+	type MeterProvider struct {
+		embedded.MeterProvider
+		// ...
+	}
+
+If an author wants the default behavior of their implementations to a panic,
+they need to embed the API interface directly.
+
+	import "go.opentelemetry.io/otel/metric"
+
+	type MeterProvider struct {
+		metric.MeterProvider
+		// ...
+	}
+
+This is not a recommended behavior as it could lead to publishing packages that
+contain runtime panics when users update other package that use newer versions
+of [go.opentelemetry.io/otel/metric].
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/metric/noop]:
+
+	import "go.opentelemetry.io/otel/metric/noop"
+
+	type MeterProvider struct {
+		noop.MeterProvider
+		// ...
+	}
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
+
+[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
+[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
+*/
+package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae0bdbd2e64572354475efcddae11f5d44e8f788
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
@@ -0,0 +1,234 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// metric API].
+//
+// Implementers of the [OpenTelemetry metric API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry metric API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric
+package embedded // import "go.opentelemetry.io/otel/metric/embedded"
+
+// MeterProvider is embedded in
+// [go.opentelemetry.io/otel/metric.MeterProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type MeterProvider interface{ meterProvider() }
+
+// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Meter interface{ meter() }
+
+// Float64Observer is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Observer] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Observer] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Observer interface{ float64Observer() }
+
+// Int64Observer is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users
+// to experience a compilation error, signaling they need to update to your
+// latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Observer] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Observer interface{ int64Observer() }
+
+// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Observer]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Observer interface{ observer() }
+
+// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Registration] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Registration]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Registration interface{ registration() }
+
+// Float64Counter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Counter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Counter] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Counter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Counter interface{ float64Counter() }
+
+// Float64Histogram is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Histogram].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Histogram interface{ float64Histogram() }
+
+// Float64ObservableCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableCounter interface{ float64ObservableCounter() }
+
+// Float64ObservableGauge is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableGauge interface{ float64ObservableGauge() }
+
+// Float64ObservableUpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
+// if you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() }
+
+// Float64UpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Float64UpDownCounter interface{ float64UpDownCounter() }
+
+// Int64Counter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Counter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users
+// to experience a compilation error, signaling they need to update to your
+// latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Counter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Counter interface{ int64Counter() }
+
+// Int64Histogram is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Histogram].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Histogram interface{ int64Histogram() }
+
+// Int64ObservableCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64ObservableCounter interface{ int64ObservableCounter() }
+
+// Int64ObservableGauge is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Int64ObservableGauge interface{ int64ObservableGauge() }
+
+// Int64ObservableUpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if
+// you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() }
+
+// Int64UpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64UpDownCounter interface{ int64UpDownCounter() }
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
new file mode 100644
index 0000000000000000000000000000000000000000..be89cd533417083be1092c8bdae7c3073168e16d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -0,0 +1,357 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Observable is used as a grouping mechanism for all instruments that are
+// updated within a Callback.
+type Observable interface {
+	observable()
+}
+
+// InstrumentOption applies options to all instruments.
+type InstrumentOption interface {
+	Int64CounterOption
+	Int64UpDownCounterOption
+	Int64HistogramOption
+	Int64ObservableCounterOption
+	Int64ObservableUpDownCounterOption
+	Int64ObservableGaugeOption
+
+	Float64CounterOption
+	Float64UpDownCounterOption
+	Float64HistogramOption
+	Float64ObservableCounterOption
+	Float64ObservableUpDownCounterOption
+	Float64ObservableGaugeOption
+}
+
+// HistogramOption applies options to histogram instruments.
+type HistogramOption interface {
+	Int64HistogramOption
+	Float64HistogramOption
+}
+
+type descOpt string
+
+func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+	c.description = string(o)
+	return c
+}
+
+// WithDescription sets the instrument description.
+func WithDescription(desc string) InstrumentOption { return descOpt(desc) }
+
+type unitOpt string
+
+func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
+	c.unit = string(o)
+	return c
+}
+
+func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+	c.unit = string(o)
+	return c
+}
+
+// WithUnit sets the instrument unit.
+//
+// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
+func WithUnit(u string) InstrumentOption { return unitOpt(u) }
+
+// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
+//
+// This option is considered "advisory", and may be ignored by API implementations.
+func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
+
+type bucketOpt []float64
+
+func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+	c.explicitBucketBoundaries = o
+	return c
+}
+
+func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+	c.explicitBucketBoundaries = o
+	return c
+}
+
+// AddOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as an AddOption.
+type AddOption interface {
+	applyAdd(AddConfig) AddConfig
+}
+
+// AddConfig contains options for an addition measurement.
+type AddConfig struct {
+	attrs attribute.Set
+}
+
+// NewAddConfig returns a new [AddConfig] with all opts applied.
+func NewAddConfig(opts []AddOption) AddConfig {
+	config := AddConfig{attrs: *attribute.EmptySet()}
+	for _, o := range opts {
+		config = o.applyAdd(config)
+	}
+	return config
+}
+
+// Attributes returns the configured attribute set.
+func (c AddConfig) Attributes() attribute.Set {
+	return c.attrs
+}
+
+// RecordOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as a RecordOption.
+type RecordOption interface {
+	applyRecord(RecordConfig) RecordConfig
+}
+
+// RecordConfig contains options for a recorded measurement.
+type RecordConfig struct {
+	attrs attribute.Set
+}
+
+// NewRecordConfig returns a new [RecordConfig] with all opts applied.
+func NewRecordConfig(opts []RecordOption) RecordConfig {
+	config := RecordConfig{attrs: *attribute.EmptySet()}
+	for _, o := range opts {
+		config = o.applyRecord(config)
+	}
+	return config
+}
+
+// Attributes returns the configured attribute set.
+func (c RecordConfig) Attributes() attribute.Set {
+	return c.attrs
+}
+
+// ObserveOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as a ObserveOption.
+type ObserveOption interface {
+	applyObserve(ObserveConfig) ObserveConfig
+}
+
+// ObserveConfig contains options for an observed measurement.
+type ObserveConfig struct {
+	attrs attribute.Set
+}
+
+// NewObserveConfig returns a new [ObserveConfig] with all opts applied.
+func NewObserveConfig(opts []ObserveOption) ObserveConfig {
+	config := ObserveConfig{attrs: *attribute.EmptySet()}
+	for _, o := range opts {
+		config = o.applyObserve(config)
+	}
+	return config
+}
+
+// Attributes returns the configured attribute set.
+func (c ObserveConfig) Attributes() attribute.Set {
+	return c.attrs
+}
+
+// MeasurementOption applies options to all instrument measurement.
+type MeasurementOption interface {
+	AddOption
+	RecordOption
+	ObserveOption
+}
+
+type attrOpt struct {
+	set attribute.Set
+}
+
+// mergeSets returns the union of keys between a and b. Any duplicate keys will
+// use the value associated with b.
+func mergeSets(a, b attribute.Set) attribute.Set {
+	// NewMergeIterator uses the first value for any duplicates.
+	iter := attribute.NewMergeIterator(&b, &a)
+	merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+	for iter.Next() {
+		merged = append(merged, iter.Attribute())
+	}
+	return attribute.NewSet(merged...)
+}
+
+func (o attrOpt) applyAdd(c AddConfig) AddConfig {
+	switch {
+	case o.set.Len() == 0:
+	case c.attrs.Len() == 0:
+		c.attrs = o.set
+	default:
+		c.attrs = mergeSets(c.attrs, o.set)
+	}
+	return c
+}
+
+func (o attrOpt) applyRecord(c RecordConfig) RecordConfig {
+	switch {
+	case o.set.Len() == 0:
+	case c.attrs.Len() == 0:
+		c.attrs = o.set
+	default:
+		c.attrs = mergeSets(c.attrs, o.set)
+	}
+	return c
+}
+
+func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig {
+	switch {
+	case o.set.Len() == 0:
+	case c.attrs.Len() == 0:
+		c.attrs = o.set
+	default:
+		c.attrs = mergeSets(c.attrs, o.set)
+	}
+	return c
+}
+
+// WithAttributeSet sets the attribute Set associated with a measurement is
+// made with.
+//
+// If multiple WithAttributeSet or WithAttributes options are passed the
+// attributes will be merged together in the order they are passed. Attributes
+// with duplicate keys will use the last value passed.
+func WithAttributeSet(attributes attribute.Set) MeasurementOption {
+	return attrOpt{set: attributes}
+}
+
+// WithAttributes converts attributes into an attribute Set and sets the Set to
+// be associated with a measurement. This is shorthand for:
+//
+//	cp := make([]attribute.KeyValue, len(attributes))
+//	copy(cp, attributes)
+//	WithAttributes(attribute.NewSet(cp...))
+//
+// [attribute.NewSet] may modify the passed attributes so this will make a copy
+// of attributes before creating a set in order to ensure this function is
+// concurrent safe. This makes this option function less optimized in
+// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be
+// preferred for performance sensitive code.
+//
+// See [WithAttributeSet] for information about how multiple WithAttributes are
+// merged.
+func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption {
+	cp := make([]attribute.KeyValue, len(attributes))
+	copy(cp, attributes)
+	return attrOpt{set: attribute.NewSet(cp...)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
new file mode 100644
index 0000000000000000000000000000000000000000..2520bc74af171959cb4ba97b5315f27597237c83
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -0,0 +1,212 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// MeterProvider provides access to named Meter instances, for instrumenting
+// an application or package.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type MeterProvider interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.MeterProvider
+
+	// Meter returns a new Meter with the provided name and configuration.
+	//
+	// A Meter should be scoped at most to a single package. The name needs to
+	// be unique so it does not collide with other names used by
+	// an application, nor other applications. To achieve this, the import path
+	// of the instrumentation package is recommended to be used as name.
+	//
+	// If the name is empty, then an implementation defined default name will
+	// be used instead.
+	Meter(name string, opts ...MeterOption) Meter
+}
+
+// Meter provides access to instrument instances for recording metrics.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Meter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Meter
+
+	// Int64Counter returns a new Int64Counter instrument identified by name
+	// and configured with options. The instrument is used to synchronously
+	// record increasing int64 measurements during a computational operation.
+	Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
+	// Int64UpDownCounter returns a new Int64UpDownCounter instrument
+	// identified by name and configured with options. The instrument is used
+	// to synchronously record int64 measurements during a computational
+	// operation.
+	Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
+	// Int64Histogram returns a new Int64Histogram instrument identified by
+	// name and configured with options. The instrument is used to
+	// synchronously record the distribution of int64 measurements during a
+	// computational operation.
+	Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+	// Int64ObservableCounter returns a new Int64ObservableCounter identified
+	// by name and configured with options. The instrument is used to
+	// asynchronously record increasing int64 measurements once per a
+	// measurement collection cycle.
+	//
+	// Measurements for the returned instrument are made via a callback. Use
+	// the WithInt64Callback option to register the callback here, or use the
+	// RegisterCallback method of this Meter to register one later. See the
+	// Measurements section of the package documentation for more information.
+	Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
+	// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
+	// instrument identified by name and configured with options. The
+	// instrument is used to asynchronously record int64 measurements once per
+	// a measurement collection cycle.
+	//
+	// Measurements for the returned instrument are made via a callback. Use
+	// the WithInt64Callback option to register the callback here, or use the
+	// RegisterCallback method of this Meter to register one later. See the
+	// Measurements section of the package documentation for more information.
+	Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
+	// Int64ObservableGauge returns a new Int64ObservableGauge instrument
+	// identified by name and configured with options. The instrument is used
+	// to asynchronously record instantaneous int64 measurements once per a
+	// measurement collection cycle.
+	//
+	// Measurements for the returned instrument are made via a callback. Use
+	// the WithInt64Callback option to register the callback here, or use the
+	// RegisterCallback method of this Meter to register one later. See the
+	// Measurements section of the package documentation for more information.
+	Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
+
+	// Float64Counter returns a new Float64Counter instrument identified by
+	// name and configured with options. The instrument is used to
+	// synchronously record increasing float64 measurements during a
+	// computational operation.
+	Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
+	// Float64UpDownCounter returns a new Float64UpDownCounter instrument
+	// identified by name and configured with options. The instrument is used
+	// to synchronously record float64 measurements during a computational
+	// operation.
+	Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
+	// Float64Histogram returns a new Float64Histogram instrument identified by
+	// name and configured with options. The instrument is used to
+	// synchronously record the distribution of float64 measurements during a
+	// computational operation.
+	Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+	// Float64ObservableCounter returns a new Float64ObservableCounter
+	// instrument identified by name and configured with options. The
+	// instrument is used to asynchronously record increasing float64
+	// measurements once per a measurement collection cycle.
+	//
+	// Measurements for the returned instrument are made via a callback. Use
+	// the WithFloat64Callback option to register the callback here, or use the
+	// RegisterCallback method of this Meter to register one later. See the
+	// Measurements section of the package documentation for more information.
+	Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
+	// Float64ObservableUpDownCounter returns a new
+	// Float64ObservableUpDownCounter instrument identified by name and
+	// configured with options. The instrument is used to asynchronously record
+	// float64 measurements once per a measurement collection cycle.
+	//
+	// Measurements for the returned instrument are made via a callback. Use
+	// the WithFloat64Callback option to register the callback here, or use the
+	// RegisterCallback method of this Meter to register one later. See the
+	// Measurements section of the package documentation for more information.
+	Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
+	// Float64ObservableGauge returns a new Float64ObservableGauge instrument
+	// identified by name and configured with options. The instrument is used
+	// to asynchronously record instantaneous float64 measurements once per a
+	// measurement collection cycle.
+	//
+	// Measurements for the returned instrument are made via a callback. Use
+	// the WithFloat64Callback option to register the callback here, or use the
+	// RegisterCallback method of this Meter to register one later. See the
+	// Measurements section of the package documentation for more information.
+	Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
+
+	// RegisterCallback registers f to be called during the collection of a
+	// measurement cycle.
+	//
+	// If Unregister of the returned Registration is called, f needs to be
+	// unregistered and not called during collection.
+	//
+	// The instruments f is registered with are the only instruments that f may
+	// observe values for.
+	//
+	// If no instruments are passed, f should not be registered nor called
+	// during collection.
+	//
+	// The function f needs to be concurrent safe.
+	RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
+}
+
+// Callback is a function registered with a Meter that makes observations for
+// the set of instruments it is registered with. The Observer parameter is used
+// to record measurement observations for these instruments.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Callbacks. Meaning, it should not report measurements for an instrument with
+// the same attributes as another Callback will report.
+//
+// The function needs to be concurrent safe.
+type Callback func(context.Context, Observer) error
+
+// Observer records measurements for multiple instruments in a Callback.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Observer interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Observer
+
+	// ObserveFloat64 records the float64 value for obsrv.
+	ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
+	// ObserveInt64 records the int64 value for obsrv.
+	ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
+}
+
+// Registration is an token representing the unique registration of a callback
+// for a set of instruments with a Meter.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Registration interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Registration
+
+	// Unregister removes the callback registration from a Meter.
+	//
+	// This method needs to be idempotent and concurrent safe.
+	Unregister() error
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 0000000000000000000000000000000000000000..acc9a670b220955a378aa5c8a6980a13cddd1e5e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,264 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+	// Compile-time check this implements the OpenTelemetry API.
+
+	_ metric.MeterProvider                  = MeterProvider{}
+	_ metric.Meter                          = Meter{}
+	_ metric.Observer                       = Observer{}
+	_ metric.Registration                   = Registration{}
+	_ metric.Int64Counter                   = Int64Counter{}
+	_ metric.Float64Counter                 = Float64Counter{}
+	_ metric.Int64UpDownCounter             = Int64UpDownCounter{}
+	_ metric.Float64UpDownCounter           = Float64UpDownCounter{}
+	_ metric.Int64Histogram                 = Int64Histogram{}
+	_ metric.Float64Histogram               = Float64Histogram{}
+	_ metric.Int64ObservableCounter         = Int64ObservableCounter{}
+	_ metric.Float64ObservableCounter       = Float64ObservableCounter{}
+	_ metric.Int64ObservableGauge           = Int64ObservableGauge{}
+	_ metric.Float64ObservableGauge         = Float64ObservableGauge{}
+	_ metric.Int64ObservableUpDownCounter   = Int64ObservableUpDownCounter{}
+	_ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+	_ metric.Int64Observer                  = Int64Observer{}
+	_ metric.Float64Observer                = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+	return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+	return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+	return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+	return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+	return Int64Histogram{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+	return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+	return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+	return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+	return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+	return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+	return Float64Histogram{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+	return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+	return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+	return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+	return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+	metric.Int64Observable
+	embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+	metric.Float64Observable
+	embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+	metric.Int64Observable
+	embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+	metric.Float64Observable
+	embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+	metric.Int64Observable
+	embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+	metric.Float64Observable
+	embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a4825ae6a79b585a894fb5a688faa2890420dac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -0,0 +1,185 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Float64Counter is an instrument that records increasing float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Counter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64Counter
+
+	// Add records a change to the counter.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Add(ctx context.Context, incr float64, options ...AddOption)
+}
+
+// Float64CounterConfig contains options for synchronous counter instruments that
+// record int64 values.
+type Float64CounterConfig struct {
+	description string
+	unit        string
+}
+
+// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts
+// applied.
+func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig {
+	var config Float64CounterConfig
+	for _, o := range opts {
+		config = o.applyFloat64Counter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Float64CounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64CounterConfig) Unit() string {
+	return c.unit
+}
+
+// Float64CounterOption applies options to a [Float64CounterConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64CounterOption.
+type Float64CounterOption interface {
+	applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
+}
+
+// Float64UpDownCounter is an instrument that records increasing or decreasing
+// float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64UpDownCounter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64UpDownCounter
+
+	// Add records a change to the counter.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Add(ctx context.Context, incr float64, options ...AddOption)
+}
+
+// Float64UpDownCounterConfig contains options for synchronous counter
+// instruments that record int64 values.
+type Float64UpDownCounterConfig struct {
+	description string
+	unit        string
+}
+
+// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig]
+// with all opts applied.
+func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig {
+	var config Float64UpDownCounterConfig
+	for _, o := range opts {
+		config = o.applyFloat64UpDownCounter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Float64UpDownCounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64UpDownCounterConfig) Unit() string {
+	return c.unit
+}
+
+// Float64UpDownCounterOption applies options to a
+// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
+// can be used as a Float64UpDownCounterOption.
+type Float64UpDownCounterOption interface {
+	applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
+}
+
+// Float64Histogram is an instrument that records a distribution of float64
+// values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Histogram interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Float64Histogram
+
+	// Record adds an additional value to the distribution.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Record(ctx context.Context, incr float64, options ...RecordOption)
+}
+
+// Float64HistogramConfig contains options for synchronous counter instruments
+// that record int64 values.
+type Float64HistogramConfig struct {
+	description              string
+	unit                     string
+	explicitBucketBoundaries []float64
+}
+
+// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
+// opts applied.
+func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig {
+	var config Float64HistogramConfig
+	for _, o := range opts {
+		config = o.applyFloat64Histogram(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Float64HistogramConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64HistogramConfig) Unit() string {
+	return c.unit
+}
+
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+	return c.explicitBucketBoundaries
+}
+
+// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64HistogramOption.
+type Float64HistogramOption interface {
+	applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
new file mode 100644
index 0000000000000000000000000000000000000000..56667d32fc018a557b243034e5f6b3695ed253c1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -0,0 +1,185 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Int64Counter is an instrument that records increasing int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Counter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64Counter
+
+	// Add records a change to the counter.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Add(ctx context.Context, incr int64, options ...AddOption)
+}
+
+// Int64CounterConfig contains options for synchronous counter instruments that
+// record int64 values.
+type Int64CounterConfig struct {
+	description string
+	unit        string
+}
+
+// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts
+// applied.
+func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig {
+	var config Int64CounterConfig
+	for _, o := range opts {
+		config = o.applyInt64Counter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Int64CounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64CounterConfig) Unit() string {
+	return c.unit
+}
+
+// Int64CounterOption applies options to a [Int64CounterConfig]. See
+// [InstrumentOption] for other options that can be used as an
+// Int64CounterOption.
+type Int64CounterOption interface {
+	applyInt64Counter(Int64CounterConfig) Int64CounterConfig
+}
+
+// Int64UpDownCounter is an instrument that records increasing or decreasing
+// int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64UpDownCounter interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64UpDownCounter
+
+	// Add records a change to the counter.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Add(ctx context.Context, incr int64, options ...AddOption)
+}
+
+// Int64UpDownCounterConfig contains options for synchronous counter
+// instruments that record int64 values.
+type Int64UpDownCounterConfig struct {
+	description string
+	unit        string
+}
+
+// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with
+// all opts applied.
+func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig {
+	var config Int64UpDownCounterConfig
+	for _, o := range opts {
+		config = o.applyInt64UpDownCounter(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Int64UpDownCounterConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64UpDownCounterConfig) Unit() string {
+	return c.unit
+}
+
+// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
+// See [InstrumentOption] for other options that can be used as an
+// Int64UpDownCounterOption.
+type Int64UpDownCounterOption interface {
+	applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
+}
+
+// Int64Histogram is an instrument that records a distribution of int64
+// values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Histogram interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Int64Histogram
+
+	// Record adds an additional value to the distribution.
+	//
+	// Use the WithAttributeSet (or, if performance is not a concern,
+	// the WithAttributes) option to include measurement attributes.
+	Record(ctx context.Context, incr int64, options ...RecordOption)
+}
+
+// Int64HistogramConfig contains options for synchronous counter instruments
+// that record int64 values.
+type Int64HistogramConfig struct {
+	description              string
+	unit                     string
+	explicitBucketBoundaries []float64
+}
+
+// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
+// applied.
+func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig {
+	var config Int64HistogramConfig
+	for _, o := range opts {
+		config = o.applyInt64Histogram(config)
+	}
+	return config
+}
+
+// Description returns the configured description.
+func (c Int64HistogramConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64HistogramConfig) Unit() string {
+	return c.unit
+}
+
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+	return c.explicitBucketBoundaries
+}
+
+// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
+// [InstrumentOption] for other options that can be used as an
+// Int64HistogramOption.
+type Int64HistogramOption interface {
+	applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
new file mode 100644
index 0000000000000000000000000000000000000000..d29aaa32c0b5b0b124b14c8b4d016fd20f09d2bf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/propagation"
+)
+
+// GetTextMapPropagator returns the global TextMapPropagator. If none has been
+// set, a No-Op TextMapPropagator is returned.
+func GetTextMapPropagator() propagation.TextMapPropagator {
+	return global.TextMapPropagator()
+}
+
+// SetTextMapPropagator sets propagator as the global TextMapPropagator.
+func SetTextMapPropagator(propagator propagation.TextMapPropagator) {
+	global.SetTextMapPropagator(propagator)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
new file mode 100644
index 0000000000000000000000000000000000000000..303cdf1cbff417b19def5800931da24b0e759d66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/baggage"
+)
+
+const baggageHeader = "baggage"
+
+// Baggage is a propagator that supports the W3C Baggage format.
+//
+// This propagates user-defined baggage associated with a trace. The complete
+// specification is defined at https://www.w3.org/TR/baggage/.
+type Baggage struct{}
+
+var _ TextMapPropagator = Baggage{}
+
+// Inject sets baggage key-values from ctx into the carrier.
+func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+	bStr := baggage.FromContext(ctx).String()
+	if bStr != "" {
+		carrier.Set(baggageHeader, bStr)
+	}
+}
+
+// Extract returns a copy of parent with the baggage from the carrier added.
+func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+	bStr := carrier.Get(baggageHeader)
+	if bStr == "" {
+		return parent
+	}
+
+	bag, err := baggage.Parse(bStr)
+	if err != nil {
+		return parent
+	}
+	return baggage.ContextWithBaggage(parent, bag)
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (b Baggage) Fields() []string {
+	return []string{baggageHeader}
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..c119eb2858b6adedf6fb29c1334b9600ab250bf2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package propagation contains OpenTelemetry context propagators.
+
+OpenTelemetry propagators are used to extract and inject context data from and
+into messages exchanged by applications. The propagator supported by this
+package is the W3C Trace Context encoding
+(https://www.w3.org/TR/trace-context/), and W3C Baggage
+(https://www.w3.org/TR/baggage/).
+*/
+package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
new file mode 100644
index 0000000000000000000000000000000000000000..c94438f73a5e1bc2e3e09c07e287529e985afa08
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+	"context"
+	"net/http"
+)
+
+// TextMapCarrier is the storage medium used by a TextMapPropagator.
+type TextMapCarrier interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Get returns the value associated with the passed key.
+	Get(key string) string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Set stores the key-value pair.
+	Set(key string, value string)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Keys lists the keys stored in this carrier.
+	Keys() []string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
+// medium for propagated key-value pairs.
+type MapCarrier map[string]string
+
+// Compile time check that MapCarrier implements the TextMapCarrier.
+var _ TextMapCarrier = MapCarrier{}
+
+// Get returns the value associated with the passed key.
+func (c MapCarrier) Get(key string) string {
+	return c[key]
+}
+
+// Set stores the key-value pair.
+func (c MapCarrier) Set(key, value string) {
+	c[key] = value
+}
+
+// Keys lists the keys stored in this carrier.
+func (c MapCarrier) Keys() []string {
+	keys := make([]string, 0, len(c))
+	for k := range c {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface.
+type HeaderCarrier http.Header
+
+// Get returns the value associated with the passed key.
+func (hc HeaderCarrier) Get(key string) string {
+	return http.Header(hc).Get(key)
+}
+
+// Set stores the key-value pair.
+func (hc HeaderCarrier) Set(key string, value string) {
+	http.Header(hc).Set(key, value)
+}
+
+// Keys lists the keys stored in this carrier.
+func (hc HeaderCarrier) Keys() []string {
+	keys := make([]string, 0, len(hc))
+	for k := range hc {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// TextMapPropagator propagates cross-cutting concerns as key-value text
+// pairs within a carrier that travels in-band across process boundaries.
+type TextMapPropagator interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Inject set cross-cutting concerns from the Context into the carrier.
+	Inject(ctx context.Context, carrier TextMapCarrier)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Extract reads cross-cutting concerns from the carrier into a Context.
+	Extract(ctx context.Context, carrier TextMapCarrier) context.Context
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Fields returns the keys whose values are set with Inject.
+	Fields() []string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type compositeTextMapPropagator []TextMapPropagator
+
+func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
+	for _, i := range p {
+		i.Inject(ctx, carrier)
+	}
+}
+
+func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+	for _, i := range p {
+		ctx = i.Extract(ctx, carrier)
+	}
+	return ctx
+}
+
+func (p compositeTextMapPropagator) Fields() []string {
+	unique := make(map[string]struct{})
+	for _, i := range p {
+		for _, k := range i.Fields() {
+			unique[k] = struct{}{}
+		}
+	}
+
+	fields := make([]string, 0, len(unique))
+	for k := range unique {
+		fields = append(fields, k)
+	}
+	return fields
+}
+
+// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
+// group of passed TextMapPropagator. This allows different cross-cutting
+// concerns to be propagates in a unified manner.
+//
+// The returned TextMapPropagator will inject and extract cross-cutting
+// concerns in the order the TextMapPropagators were provided. Additionally,
+// the Fields method will return a de-duplicated slice of the keys that are
+// set with the Inject method.
+func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
+	return compositeTextMapPropagator(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..63e5d62221f06cb744b5e25996787ecbf4198266
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -0,0 +1,167 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+	"context"
+	"encoding/hex"
+	"fmt"
+	"strings"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+const (
+	supportedVersion  = 0
+	maxVersion        = 254
+	traceparentHeader = "traceparent"
+	tracestateHeader  = "tracestate"
+	delimiter         = "-"
+)
+
+// TraceContext is a propagator that supports the W3C Trace Context format
+// (https://www.w3.org/TR/trace-context/)
+//
+// This propagator will propagate the traceparent and tracestate headers to
+// guarantee traces are not broken. It is up to the users of this propagator
+// to choose if they want to participate in a trace by modifying the
+// traceparent header and relevant parts of the tracestate header containing
+// their proprietary information.
+type TraceContext struct{}
+
+var (
+	_           TextMapPropagator = TraceContext{}
+	versionPart                   = fmt.Sprintf("%.2X", supportedVersion)
+)
+
+// Inject set tracecontext from the Context into the carrier.
+func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+	sc := trace.SpanContextFromContext(ctx)
+	if !sc.IsValid() {
+		return
+	}
+
+	if ts := sc.TraceState().String(); ts != "" {
+		carrier.Set(tracestateHeader, ts)
+	}
+
+	// Clear all flags other than the trace-context supported sampling bit.
+	flags := sc.TraceFlags() & trace.FlagsSampled
+
+	var sb strings.Builder
+	sb.Grow(2 + 32 + 16 + 2 + 3)
+	_, _ = sb.WriteString(versionPart)
+	traceID := sc.TraceID()
+	spanID := sc.SpanID()
+	flagByte := [1]byte{byte(flags)}
+	var buf [32]byte
+	for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+		_ = sb.WriteByte(delimiter[0])
+		n := hex.Encode(buf[:], src)
+		_, _ = sb.Write(buf[:n])
+	}
+	carrier.Set(traceparentHeader, sb.String())
+}
+
+// Extract reads tracecontext from the carrier into a returned Context.
+//
+// The returned Context will be a copy of ctx and contain the extracted
+// tracecontext as the remote SpanContext. If the extracted tracecontext is
+// invalid, the passed ctx will be returned directly instead.
+func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+	sc := tc.extract(carrier)
+	if !sc.IsValid() {
+		return ctx
+	}
+	return trace.ContextWithRemoteSpanContext(ctx, sc)
+}
+
+func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+	h := carrier.Get(traceparentHeader)
+	if h == "" {
+		return trace.SpanContext{}
+	}
+
+	var ver [1]byte
+	if !extractPart(ver[:], &h, 2) {
+		return trace.SpanContext{}
+	}
+	version := int(ver[0])
+	if version > maxVersion {
+		return trace.SpanContext{}
+	}
+
+	var scc trace.SpanContextConfig
+	if !extractPart(scc.TraceID[:], &h, 32) {
+		return trace.SpanContext{}
+	}
+	if !extractPart(scc.SpanID[:], &h, 16) {
+		return trace.SpanContext{}
+	}
+
+	var opts [1]byte
+	if !extractPart(opts[:], &h, 2) {
+		return trace.SpanContext{}
+	}
+	if version == 0 && (h != "" || opts[0] > 2) {
+		// version 0 not allow extra
+		// version 0 not allow other flag
+		return trace.SpanContext{}
+	}
+
+	// Clear all flags other than the trace-context supported sampling bit.
+	scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+
+	// Ignore the error returned here. Failure to parse tracestate MUST NOT
+	// affect the parsing of traceparent according to the W3C tracecontext
+	// specification.
+	scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader))
+	scc.Remote = true
+
+	sc := trace.NewSpanContext(scc)
+	if !sc.IsValid() {
+		return trace.SpanContext{}
+	}
+
+	return sc
+}
+
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+	for _, c := range v {
+		if c >= 'A' && c <= 'F' {
+			return true
+		}
+	}
+	return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+	part, left, _ := strings.Cut(*h, delimiter)
+	*h = left
+	// hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+	if len(part) != n || upperHex(part) {
+		return false
+	}
+	if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+		return false
+	}
+	return true
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (tc TraceContext) Fields() []string {
+	return []string{traceparentHeader, tracestateHeader}
+}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e0a43e13840e4c92a47259ab616f858adfbdbe3f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -0,0 +1 @@
+codespell==2.2.6
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..6e923acab43286fa8d77222573646055daa2331a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package instrumentation provides types to represent the code libraries that
+// provide OpenTelemetry instrumentation. These types are used in the
+// OpenTelemetry signal pipelines to identify the source of telemetry.
+//
+// See
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
+// and
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
+// for more information.
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
new file mode 100644
index 0000000000000000000000000000000000000000..39f025a1715bfe2cca0edc01760eee28599bae4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Library represents the instrumentation library.
+// Deprecated: please use Scope instead.
+type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
new file mode 100644
index 0000000000000000000000000000000000000000..09c6d93f6d09563d76ccbfdf05d12701a2b3b17d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Scope represents the instrumentation scope.
+type Scope struct {
+	// Name is the name of the instrumentation scope. This should be the
+	// Go package name of that scope.
+	Name string
+	// Version is the version of the instrumentation scope.
+	Version string
+	// SchemaURL of the telemetry emitted by the scope.
+	SchemaURL string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..59dcfab25013e6d58da80613112ad93bfdaecc30
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package env // import "go.opentelemetry.io/otel/sdk/internal/env"
+
+import (
+	"os"
+	"strconv"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+	// BatchSpanProcessorScheduleDelayKey is the delay interval between two
+	// consecutive exports (i.e. 5000).
+	BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
+	// BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
+	// export data (i.e. 3000).
+	BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
+	// BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+	BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
+	// BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+	// 512). Note: it must be less than or equal to
+	// EnvBatchSpanProcessorMaxQueueSize.
+	BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
+
+	// AttributeValueLengthKey is the maximum allowed attribute value size.
+	AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+	// AttributeCountKey is the maximum allowed span attribute count.
+	AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
+
+	// SpanAttributeValueLengthKey is the maximum allowed attribute value size
+	// for a span.
+	SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+	// SpanAttributeCountKey is the maximum allowed span attribute count for a
+	// span.
+	SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
+
+	// SpanEventCountKey is the maximum allowed span event count.
+	SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
+
+	// SpanEventAttributeCountKey is the maximum allowed attribute per span
+	// event count.
+	SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
+
+	// SpanLinkCountKey is the maximum allowed span link count.
+	SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
+
+	// SpanLinkAttributeCountKey is the maximum allowed attribute per span
+	// link count.
+	SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+	for _, key := range keys {
+		value := os.Getenv(key)
+		if value == "" {
+			continue
+		}
+
+		intValue, err := strconv.Atoi(value)
+		if err != nil {
+			global.Info("Got invalid value, number value expected.", key, value)
+			return defaultValue
+		}
+
+		return intValue
+	}
+
+	return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+	value := os.Getenv(key)
+	if value == "" {
+		return defaultValue
+	}
+
+	intValue, err := strconv.Atoi(value)
+	if err != nil {
+		global.Info("Got invalid value, number value expected.", key, value)
+		return defaultValue
+	}
+
+	return intValue
+}
+
+// BatchSpanProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorScheduleDelay(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchSpanProcessorExportTimeout returns the environment variable value for
+// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorExportTimeout(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
+}
+
+// SpanAttributeValueLength returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
+// returned or defaultValue if that is not set.
+func SpanAttributeValueLength(defaultValue int) int {
+	return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
+}
+
+// SpanAttributeCount returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
+// defaultValue if that is not set.
+func SpanAttributeCount(defaultValue int) int {
+	return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
+}
+
+// SpanEventCount returns the environment variable value for the
+// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanEventCount(defaultValue int) int {
+	return IntEnvOr(SpanEventCountKey, defaultValue)
+}
+
+// SpanEventAttributeCount returns the environment variable value for the
+// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
+// is returned.
+func SpanEventAttributeCount(defaultValue int) int {
+	return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
+}
+
+// SpanLinkCount returns the environment variable value for the
+// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkCount(defaultValue int) int {
+	return IntEnvOr(SpanLinkCountKey, defaultValue)
+}
+
+// SpanLinkAttributeCount returns the environment variable value for the
+// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkAttributeCount(defaultValue int) int {
+	return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd84f624b45da713667fea22c3e659f9f51d271f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/sdk/internal"
+
+//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
+//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
+//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
+
+//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfeaaa8ca04426201cd55de2cbab98da0d340071
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/sdk/internal"
+
+import "time"
+
+// MonotonicEndTime returns the end time at present
+// but offset from start, monotonically.
+//
+// The monotonic clock is used in subtractions hence
+// the duration since start added back to start gives
+// end as a monotonic time.
+// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func MonotonicEndTime(start time.Time) time.Time {
+	return start.Add(time.Since(start))
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/EXPERIMENTAL.md b/vendor/go.opentelemetry.io/otel/sdk/metric/EXPERIMENTAL.md
new file mode 100644
index 0000000000000000000000000000000000000000..658d20276878f0f9508de0e3379e83ccbec7a6d3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/EXPERIMENTAL.md
@@ -0,0 +1,111 @@
+# Experimental Features
+
+The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Cardinality Limit](#cardinality-limit)
+
+### Cardinality Limit
+
+The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
+The value must be an integer value.
+All other values are ignored.
+
+If the value set is less than or equal to `0`, no limit will be applied.
+
+#### Examples
+
+Set the cardinality limit to 2000.
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=2000
+```
+
+Set an infinite cardinality limit (functionally equivalent to disabling the feature).
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=-1
+```
+
+Disable the cardinality limit.
+
+```console
+unset OTEL_GO_X_CARDINALITY_LIMIT
+```
+
+### Exemplars
+
+A sample of measurements made may be exported directly as a set of exemplars.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
+The value of must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+Exemplar filters are a supported.
+The exemplar filter applies to all measurements made.
+They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
+
+To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
+The value must be the case-sensitive string defined by the [OpenTelemetry specification].
+
+- `"always_on"`: allows all measurements
+- `"always_off"`: denies all measurements
+- `"trace_based"`: allows only sampled measurements
+
+All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
+
+[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
+
+#### Examples
+
+Enable exemplars to be exported.
+
+```console
+export OTEL_GO_X_EXEMPLAR=true
+```
+
+Disable exemplars from being exported.
+
+```console
+unset OTEL_GO_X_EXEMPLAR
+```
+
+Set the exemplar filter to allow all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_on
+```
+
+Set the exemplar filter to deny all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_off
+```
+
+Set the exemplar filter to only allow sampled measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
+```
+
+Revert to the default exemplar filter (`"trace_based"`)
+
+```console
+unset OTEL_METRICS_EXEMPLAR_FILTER
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
new file mode 100644
index 0000000000000000000000000000000000000000..faddbb0b61b19039a3f8bb06ec5ab8640cd3d516
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
@@ -0,0 +1,201 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"errors"
+	"fmt"
+)
+
+// errAgg is wrapped by misconfigured aggregations.
+var errAgg = errors.New("aggregation")
+
+// Aggregation is the aggregation used to summarize recorded measurements.
+type Aggregation interface {
+	// copy returns a deep copy of the Aggregation.
+	copy() Aggregation
+
+	// err returns an error for any misconfigured Aggregation.
+	err() error
+}
+
+// AggregationDrop is an Aggregation that drops all recorded data.
+type AggregationDrop struct{} // AggregationDrop has no parameters.
+
+var _ Aggregation = AggregationDrop{}
+
+// copy returns a deep copy of d.
+func (d AggregationDrop) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A drop aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDrop) err() error { return nil }
+
+// AggregationDefault is an Aggregation that uses the default instrument kind selection
+// mapping to select another Aggregation. A metric reader can be configured to
+// make an aggregation selection based on instrument kind that differs from
+// the default. This Aggregation ensures the default is used.
+//
+// See the [DefaultAggregationSelector] for information about the default
+// instrument kind selection mapping.
+type AggregationDefault struct{} // AggregationDefault has no parameters.
+
+var _ Aggregation = AggregationDefault{}
+
+// copy returns a deep copy of d.
+func (d AggregationDefault) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A default aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDefault) err() error { return nil }
+
+// AggregationSum is an Aggregation that summarizes a set of measurements as their
+// arithmetic sum.
+type AggregationSum struct{} // AggregationSum has no parameters.
+
+var _ Aggregation = AggregationSum{}
+
+// copy returns a deep copy of s.
+func (s AggregationSum) copy() Aggregation { return s }
+
+// err returns an error for any misconfiguration. A sum aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationSum) err() error { return nil }
+
+// AggregationLastValue is an Aggregation that summarizes a set of measurements as the
+// last one made.
+type AggregationLastValue struct{} // AggregationLastValue has no parameters.
+
+var _ Aggregation = AggregationLastValue{}
+
+// copy returns a deep copy of l.
+func (l AggregationLastValue) copy() Aggregation { return l }
+
+// err returns an error for any misconfiguration. A last-value aggregation has
+// no parameters and cannot be misconfigured, therefore this always returns
+// nil.
+func (AggregationLastValue) err() error { return nil }
+
+// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with explicitly defined buckets.
+type AggregationExplicitBucketHistogram struct {
+	// Boundaries are the increasing bucket boundary values. Boundary values
+	// define bucket upper bounds. Buckets are exclusive of their lower
+	// boundary and inclusive of their upper bound (except at positive
+	// infinity). A measurement is defined to fall into the greatest-numbered
+	// bucket with a boundary that is greater than or equal to the
+	// measurement. As an example, boundaries defined as:
+	//
+	// []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
+	//
+	// Will define these buckets:
+	//
+	// (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
+	// (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
+	// (500.0, 1000.0], (1000.0, +∞)
+	Boundaries []float64
+	// NoMinMax indicates whether to not record the min and max of the
+	// distribution. By default, these extrema are recorded.
+	//
+	// Recording these extrema for cumulative data is expected to have little
+	// value, they will represent the entire life of the instrument instead of
+	// just the current collection cycle. It is recommended to set this to true
+	// for that type of data to avoid computing the low-value extrema.
+	NoMinMax bool
+}
+
+var _ Aggregation = AggregationExplicitBucketHistogram{}
+
+// errHist is returned by misconfigured ExplicitBucketHistograms.
+var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
+
+// err returns an error for any misconfiguration.
+func (h AggregationExplicitBucketHistogram) err() error {
+	if len(h.Boundaries) <= 1 {
+		return nil
+	}
+
+	// Check boundaries are monotonic.
+	i := h.Boundaries[0]
+	for _, j := range h.Boundaries[1:] {
+		if i >= j {
+			return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
+		}
+		i = j
+	}
+
+	return nil
+}
+
+// copy returns a deep copy of h.
+func (h AggregationExplicitBucketHistogram) copy() Aggregation {
+	b := make([]float64, len(h.Boundaries))
+	copy(b, h.Boundaries)
+	return AggregationExplicitBucketHistogram{
+		Boundaries: b,
+		NoMinMax:   h.NoMinMax,
+	}
+}
+
+// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with bucket widths that grow exponentially.
+type AggregationBase2ExponentialHistogram struct {
+	// MaxSize is the maximum number of buckets to use for the histogram.
+	MaxSize int32
+	// MaxScale is the maximum resolution scale to use for the histogram.
+	//
+	// MaxScale has a maximum value of 20. Using a value of 20 means the
+	// maximum number of buckets that can fit within the range of a
+	// signed 32-bit integer index could be used.
+	//
+	// MaxScale has a minimum value of -10. Using a value of -10 means only
+	// two buckets will be used.
+	MaxScale int32
+
+	// NoMinMax indicates whether to not record the min and max of the
+	// distribution. By default, these extrema are recorded.
+	//
+	// Recording these extrema for cumulative data is expected to have little
+	// value, they will represent the entire life of the instrument instead of
+	// just the current collection cycle. It is recommended to set this to true
+	// for that type of data to avoid computing the low-value extrema.
+	NoMinMax bool
+}
+
+var _ Aggregation = AggregationBase2ExponentialHistogram{}
+
+// copy returns a deep copy of the Aggregation.
+func (e AggregationBase2ExponentialHistogram) copy() Aggregation {
+	return e
+}
+
+const (
+	expoMaxScale = 20
+	expoMinScale = -10
+)
+
+// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
+var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
+
+// err returns an error for any misconfigured Aggregation.
+func (e AggregationBase2ExponentialHistogram) err() error {
+	if e.MaxScale > expoMaxScale {
+		return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
+	}
+	if e.MaxSize <= 0 {
+		return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
+	}
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..e9c0b38d0bcc4e7078aa6980bd18282564f55aac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"sync"
+)
+
+// cache is a locking storage used to quickly return already computed values.
+//
+// The zero value of a cache is empty and ready to use.
+//
+// A cache must not be copied after first use.
+//
+// All methods of a cache are safe to call concurrently.
+type cache[K comparable, V any] struct {
+	sync.Mutex
+	data map[K]V
+}
+
+// Lookup returns the value stored in the cache with the associated key if it
+// exists. Otherwise, f is called and its returned value is set in the cache
+// for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cache lock, so f
+// should not block excessively.
+func (c *cache[K, V]) Lookup(key K, f func() V) V {
+	c.Lock()
+	defer c.Unlock()
+
+	if c.data == nil {
+		val := f()
+		c.data = map[K]V{key: val}
+		return val
+	}
+	if v, ok := c.data[key]; ok {
+		return v
+	}
+	val := f()
+	c.data[key] = val
+	return val
+}
+
+// HasKey returns true if Lookup has previously been called with that key
+//
+// HasKey is safe to call concurrently.
+func (c *cache[K, V]) HasKey(key K) bool {
+	c.Lock()
+	defer c.Unlock()
+	_, ok := c.data[key]
+	return ok
+}
+
+// cacheWithErr is a locking storage used to quickly return already computed values and an error.
+//
+// The zero value of a cacheWithErr is empty and ready to use.
+//
+// A cacheWithErr must not be copied after first use.
+//
+// All methods of a cacheWithErr are safe to call concurrently.
+type cacheWithErr[K comparable, V any] struct {
+	cache[K, valAndErr[V]]
+}
+
+type valAndErr[V any] struct {
+	val V
+	err error
+}
+
+// Lookup returns the value stored in the cacheWithErr with the associated key
+// if it exists. Otherwise, f is called and its returned value is set in the
+// cacheWithErr for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
+// should not block excessively.
+func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
+	combined := c.cache.Lookup(key, func() valAndErr[V] {
+		val, err := f()
+		return valAndErr[V]{val: val, err: err}
+	})
+	return combined.val, combined.err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b191128494234715d2f28b5213777513354cd25
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
@@ -0,0 +1,148 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel/sdk/resource"
+)
+
+// config contains configuration options for a MeterProvider.
+type config struct {
+	res     *resource.Resource
+	readers []Reader
+	views   []View
+}
+
+// readerSignals returns a force-flush and shutdown function for a
+// MeterProvider to call in their respective options. All Readers c contains
+// will have their force-flush and shutdown methods unified into returned
+// single functions.
+func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
+	var fFuncs, sFuncs []func(context.Context) error
+	for _, r := range c.readers {
+		sFuncs = append(sFuncs, r.Shutdown)
+		if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok {
+			fFuncs = append(fFuncs, f.ForceFlush)
+		}
+	}
+
+	return unify(fFuncs), unifyShutdown(sFuncs)
+}
+
+// unify unifies calling all of funcs into a single function call. All errors
+// returned from calls to funcs will be unify into a single error return
+// value.
+func unify(funcs []func(context.Context) error) func(context.Context) error {
+	return func(ctx context.Context) error {
+		var errs []error
+		for _, f := range funcs {
+			if err := f(ctx); err != nil {
+				errs = append(errs, err)
+			}
+		}
+		return unifyErrors(errs)
+	}
+}
+
+// unifyErrors combines multiple errors into a single error.
+func unifyErrors(errs []error) error {
+	switch len(errs) {
+	case 0:
+		return nil
+	case 1:
+		return errs[0]
+	default:
+		return fmt.Errorf("%v", errs)
+	}
+}
+
+// unifyShutdown unifies calling all of funcs once for a shutdown. If called
+// more than once, an ErrReaderShutdown error is returned.
+func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
+	f := unify(funcs)
+	var once sync.Once
+	return func(ctx context.Context) error {
+		err := ErrReaderShutdown
+		once.Do(func() { err = f(ctx) })
+		return err
+	}
+}
+
+// newConfig returns a config configured with options.
+func newConfig(options []Option) config {
+	conf := config{res: resource.Default()}
+	for _, o := range options {
+		conf = o.apply(conf)
+	}
+	return conf
+}
+
+// Option applies a configuration option value to a MeterProvider.
+type Option interface {
+	apply(config) config
+}
+
+// optionFunc applies a set of options to a config.
+type optionFunc func(config) config
+
+// apply returns a config with option(s) applied.
+func (o optionFunc) apply(conf config) config {
+	return o(conf)
+}
+
+// WithResource associates a Resource with a MeterProvider. This Resource
+// represents the entity producing telemetry and is associated with all Meters
+// the MeterProvider will create.
+//
+// By default, if this Option is not used, the default Resource from the
+// go.opentelemetry.io/otel/sdk/resource package will be used.
+func WithResource(res *resource.Resource) Option {
+	return optionFunc(func(conf config) config {
+		conf.res = res
+		return conf
+	})
+}
+
+// WithReader associates Reader r with a MeterProvider.
+//
+// By default, if this option is not used, the MeterProvider will perform no
+// operations; no data will be exported without a Reader.
+func WithReader(r Reader) Option {
+	return optionFunc(func(cfg config) config {
+		if r == nil {
+			return cfg
+		}
+		cfg.readers = append(cfg.readers, r)
+		return cfg
+	})
+}
+
+// WithView associates views a MeterProvider.
+//
+// Views are appended to existing ones in a MeterProvider if this option is
+// used multiple times.
+//
+// By default, if this option is not used, the MeterProvider will use the
+// default view.
+func WithView(views ...View) Option {
+	return optionFunc(func(cfg config) config {
+		cfg.views = append(cfg.views, views...)
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..53f80c42893d804549d745db55db08bb86c4bb34
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metric provides an implementation of the OpenTelemetry metrics SDK.
+//
+// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
+// about the concept of OpenTelemetry metrics and
+// https://opentelemetry.io/docs/concepts/components/ for more information
+// about OpenTelemetry SDKs.
+//
+// The entry point for the metric package is the MeterProvider. It is the
+// object that all API calls use to create Meters, instruments, and ultimately
+// make metric measurements. Also, it is an object that should be used to
+// control the life-cycle (start, flush, and shutdown) of the SDK.
+//
+// A MeterProvider needs to be configured to export the measured data, this is
+// done by configuring it with a Reader implementation (using the WithReader
+// MeterProviderOption). Readers take two forms: ones that push to an endpoint
+// (NewPeriodicReader), and ones that an endpoint pulls from. See
+// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
+// or with these Readers.
+//
+// Each Reader, when registered with the MeterProvider, can be augmented with a
+// View. Views allow users that run OpenTelemetry instrumented code to modify
+// the generated data of that instrumentation.
+//
+// The data generated by a MeterProvider needs to include information about its
+// origin. A MeterProvider needs to be configured with a Resource, using the
+// WithResource MeterProviderOption, to include this information. This Resource
+// should be used to describe the unique runtime environment instrumented code
+// is being run on. That way when multiple instances of the code are collected
+// at a single endpoint their origin is decipherable.
+//
+// See [go.opentelemetry.io/otel/metric] for more information about
+// the metric API.
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..940ba8159428e4e2639235084e4e3db515d6c503
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"os"
+	"strconv"
+	"time"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+	// The time interval (in milliseconds) between the start of two export attempts.
+	envInterval = "OTEL_METRIC_EXPORT_INTERVAL"
+	// Maximum allowed time (in milliseconds) to export data.
+	envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT"
+)
+
+// envDuration returns an environment variable's value as duration in milliseconds if it is exists,
+// or the defaultValue if the environment variable is not defined or the value is not valid.
+func envDuration(key string, defaultValue time.Duration) time.Duration {
+	v := os.Getenv(key)
+	if v == "" {
+		return defaultValue
+	}
+	d, err := strconv.Atoi(v)
+	if err != nil {
+		global.Error(err, "parse duration", "environment variable", key, "value", v)
+		return defaultValue
+	}
+	if d <= 0 {
+		global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v)
+		return defaultValue
+	}
+	return time.Duration(d) * time.Millisecond
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
new file mode 100644
index 0000000000000000000000000000000000000000..77579acdb40b40317531f8c3ecba57a9ea128944
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"os"
+	"runtime"
+
+	"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+	"go.opentelemetry.io/otel/sdk/metric/internal/x"
+)
+
+// reservoirFunc returns the appropriately configured exemplar reservoir
+// creation func based on the passed InstrumentKind and user defined
+// environment variables.
+//
+// Note: This will only return non-nil values when the experimental exemplar
+// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
+// is not set to always_off.
+func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir[N] {
+	if !x.Exemplars.Enabled() {
+		return nil
+	}
+
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
+	resF := func() func() exemplar.Reservoir[N] {
+		// Explicit bucket histogram aggregation with more than 1 bucket will
+		// use AlignedHistogramBucketExemplarReservoir.
+		a, ok := agg.(AggregationExplicitBucketHistogram)
+		if ok && len(a.Boundaries) > 0 {
+			cp := make([]float64, len(a.Boundaries))
+			copy(cp, a.Boundaries)
+			return func() exemplar.Reservoir[N] {
+				bounds := cp
+				return exemplar.Histogram[N](bounds)
+			}
+		}
+
+		var n int
+		if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
+			// Base2 Exponential Histogram Aggregation SHOULD use a
+			// SimpleFixedSizeExemplarReservoir with a reservoir equal to the
+			// smaller of the maximum number of buckets configured on the
+			// aggregation or twenty (e.g. min(20, max_buckets)).
+			n = int(a.MaxSize)
+			if n > 20 {
+				n = 20
+			}
+		} else {
+			// https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+			//   This Exemplar reservoir MAY take a configuration parameter for
+			//   the size of the reservoir. If no size configuration is
+			//   provided, the default size MAY be the number of possible
+			//   concurrent threads (e.g. numer of CPUs) to help reduce
+			//   contention. Otherwise, a default size of 1 SHOULD be used.
+			n = runtime.NumCPU()
+			if n < 1 {
+				// Should never be the case, but be defensive.
+				n = 1
+			}
+		}
+
+		return func() exemplar.Reservoir[N] {
+			return exemplar.FixedSize[N](n)
+		}
+	}
+
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
+	const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
+
+	switch os.Getenv(filterEnvKey) {
+	case "always_on":
+		return resF()
+	case "always_off":
+		return exemplar.Drop[N]
+	case "trace_based":
+		fallthrough
+	default:
+		newR := resF()
+		return func() exemplar.Reservoir[N] {
+			return exemplar.SampledFilter(newR())
+		}
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..da8941b378d30d03b339aec2f0acbecfde30f6c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
@@ -0,0 +1,88 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"fmt"
+
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ErrExporterShutdown is returned if Export or Shutdown are called after an
+// Exporter has been Shutdown.
+var ErrExporterShutdown = fmt.Errorf("exporter is shutdown")
+
+// Exporter handles the delivery of metric data to external receivers. This is
+// the final component in the metric push pipeline.
+type Exporter interface {
+	// Temporality returns the Temporality to use for an instrument kind.
+	//
+	// This method needs to be concurrent safe with itself and all the other
+	// Exporter methods.
+	Temporality(InstrumentKind) metricdata.Temporality
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Aggregation returns the Aggregation to use for an instrument kind.
+	//
+	// This method needs to be concurrent safe with itself and all the other
+	// Exporter methods.
+	Aggregation(InstrumentKind) Aggregation
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Export serializes and transmits metric data to a receiver.
+	//
+	// This is called synchronously, there is no concurrency safety
+	// requirement. Because of this, it is critical that all timeouts and
+	// cancellations of the passed context be honored.
+	//
+	// All retry logic must be contained in this function. The SDK does not
+	// implement any retry logic. All errors returned by this function are
+	// considered unrecoverable and will be reported to a configured error
+	// Handler.
+	//
+	// The passed ResourceMetrics may be reused when the call completes. If an
+	// exporter needs to hold this data after it returns, it needs to make a
+	// copy.
+	Export(context.Context, *metricdata.ResourceMetrics) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ForceFlush flushes any metric data held by an exporter.
+	//
+	// The deadline or cancellation of the passed context must be honored. An
+	// appropriate error should be returned in these situations.
+	//
+	// This method needs to be concurrent safe.
+	ForceFlush(context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown flushes all metric data held by an exporter and releases any
+	// held computational resources.
+	//
+	// The deadline or cancellation of the passed context must be honored. An
+	// appropriate error should be returned in these situations.
+	//
+	// After Shutdown is called, calls to Export will perform no operation and
+	// instead will return an error indicating the shutdown state.
+	//
+	// This method needs to be concurrent safe.
+	Shutdown(context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4cfcbb95f13583ae7bdc0aa305c2138238763dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
@@ -0,0 +1,353 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+var (
+	zeroInstrumentKind InstrumentKind
+	zeroScope          instrumentation.Scope
+)
+
+// InstrumentKind is the identifier of a group of instruments that all
+// performing the same function.
+type InstrumentKind uint8
+
+const (
+	// instrumentKindUndefined is an undefined instrument kind, it should not
+	// be used by any initialized type.
+	instrumentKindUndefined InstrumentKind = iota // nolint:deadcode,varcheck,unused
+	// InstrumentKindCounter identifies a group of instruments that record
+	// increasing values synchronously with the code path they are measuring.
+	InstrumentKindCounter
+	// InstrumentKindUpDownCounter identifies a group of instruments that
+	// record increasing and decreasing values synchronously with the code path
+	// they are measuring.
+	InstrumentKindUpDownCounter
+	// InstrumentKindHistogram identifies a group of instruments that record a
+	// distribution of values synchronously with the code path they are
+	// measuring.
+	InstrumentKindHistogram
+	// InstrumentKindObservableCounter identifies a group of instruments that
+	// record increasing values in an asynchronous callback.
+	InstrumentKindObservableCounter
+	// InstrumentKindObservableUpDownCounter identifies a group of instruments
+	// that record increasing and decreasing values in an asynchronous
+	// callback.
+	InstrumentKindObservableUpDownCounter
+	// InstrumentKindObservableGauge identifies a group of instruments that
+	// record current values in an asynchronous callback.
+	InstrumentKindObservableGauge
+)
+
+type nonComparable [0]func() // nolint: unused  // This is indeed used.
+
+// Instrument describes properties an instrument is created with.
+type Instrument struct {
+	// Name is the human-readable identifier of the instrument.
+	Name string
+	// Description describes the purpose of the instrument.
+	Description string
+	// Kind defines the functional group of the instrument.
+	Kind InstrumentKind
+	// Unit is the unit of measurement recorded by the instrument.
+	Unit string
+	// Scope identifies the instrumentation that created the instrument.
+	Scope instrumentation.Scope
+
+	// Ensure forward compatibility if non-comparable fields need to be added.
+	nonComparable // nolint: unused
+}
+
+// empty returns if all fields of i are their zero-value.
+func (i Instrument) empty() bool {
+	return i.Name == "" &&
+		i.Description == "" &&
+		i.Kind == zeroInstrumentKind &&
+		i.Unit == "" &&
+		i.Scope == zeroScope
+}
+
+// matches returns whether all the non-zero-value fields of i match the
+// corresponding fields of other. If i is empty it will match all other, and
+// true will always be returned.
+func (i Instrument) matches(other Instrument) bool {
+	return i.matchesName(other) &&
+		i.matchesDescription(other) &&
+		i.matchesKind(other) &&
+		i.matchesUnit(other) &&
+		i.matchesScope(other)
+}
+
+// matchesName returns true if the Name of i is "" or it equals the Name of
+// other, otherwise false.
+func (i Instrument) matchesName(other Instrument) bool {
+	return i.Name == "" || i.Name == other.Name
+}
+
+// matchesDescription returns true if the Description of i is "" or it equals
+// the Description of other, otherwise false.
+func (i Instrument) matchesDescription(other Instrument) bool {
+	return i.Description == "" || i.Description == other.Description
+}
+
+// matchesKind returns true if the Kind of i is its zero-value or it equals the
+// Kind of other, otherwise false.
+func (i Instrument) matchesKind(other Instrument) bool {
+	return i.Kind == zeroInstrumentKind || i.Kind == other.Kind
+}
+
+// matchesUnit returns true if the Unit of i is its zero-value or it equals the
+// Unit of other, otherwise false.
+func (i Instrument) matchesUnit(other Instrument) bool {
+	return i.Unit == "" || i.Unit == other.Unit
+}
+
+// matchesScope returns true if the Scope of i is its zero-value or it equals
+// the Scope of other, otherwise false.
+func (i Instrument) matchesScope(other Instrument) bool {
+	return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) &&
+		(i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) &&
+		(i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL)
+}
+
+// Stream describes the stream of data an instrument produces.
+type Stream struct {
+	// Name is the human-readable identifier of the stream.
+	Name string
+	// Description describes the purpose of the data.
+	Description string
+	// Unit is the unit of measurement recorded.
+	Unit string
+	// Aggregation the stream uses for an instrument.
+	Aggregation Aggregation
+	// AttributeFilter is an attribute Filter applied to the attributes
+	// recorded for an instrument's measurement. If the filter returns false
+	// the attribute will not be recorded, otherwise, if it returns true, it
+	// will record the attribute.
+	//
+	// Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
+	// provide an allow-list of attribute keys here.
+	AttributeFilter attribute.Filter
+}
+
+// instID are the identifying properties of a instrument.
+type instID struct {
+	// Name is the name of the stream.
+	Name string
+	// Description is the description of the stream.
+	Description string
+	// Kind defines the functional group of the instrument.
+	Kind InstrumentKind
+	// Unit is the unit of the stream.
+	Unit string
+	// Number is the number type of the stream.
+	Number string
+}
+
+// Returns a normalized copy of the instID i.
+//
+// Instrument names are considered case-insensitive. Standardize the instrument
+// name to always be lowercase for the returned instID so it can be compared
+// without the name casing affecting the comparison.
+func (i instID) normalize() instID {
+	i.Name = strings.ToLower(i.Name)
+	return i
+}
+
+type int64Inst struct {
+	measures []aggregate.Measure[int64]
+
+	embedded.Int64Counter
+	embedded.Int64UpDownCounter
+	embedded.Int64Histogram
+}
+
+var (
+	_ metric.Int64Counter       = (*int64Inst)(nil)
+	_ metric.Int64UpDownCounter = (*int64Inst)(nil)
+	_ metric.Int64Histogram     = (*int64Inst)(nil)
+)
+
+func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
+	c := metric.NewAddConfig(opts)
+	i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) {
+	c := metric.NewRecordConfig(opts)
+	i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive  // okay to shadow pkg with method.
+	for _, in := range i.measures {
+		in(ctx, val, s)
+	}
+}
+
+type float64Inst struct {
+	measures []aggregate.Measure[float64]
+
+	embedded.Float64Counter
+	embedded.Float64UpDownCounter
+	embedded.Float64Histogram
+}
+
+var (
+	_ metric.Float64Counter       = (*float64Inst)(nil)
+	_ metric.Float64UpDownCounter = (*float64Inst)(nil)
+	_ metric.Float64Histogram     = (*float64Inst)(nil)
+)
+
+func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
+	c := metric.NewAddConfig(opts)
+	i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) {
+	c := metric.NewRecordConfig(opts)
+	i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
+	for _, in := range i.measures {
+		in(ctx, val, s)
+	}
+}
+
+// observablID is a comparable unique identifier of an observable.
+type observablID[N int64 | float64] struct {
+	name        string
+	description string
+	kind        InstrumentKind
+	unit        string
+	scope       instrumentation.Scope
+}
+
+type float64Observable struct {
+	metric.Float64Observable
+	*observable[float64]
+
+	embedded.Float64ObservableCounter
+	embedded.Float64ObservableUpDownCounter
+	embedded.Float64ObservableGauge
+}
+
+var (
+	_ metric.Float64ObservableCounter       = float64Observable{}
+	_ metric.Float64ObservableUpDownCounter = float64Observable{}
+	_ metric.Float64ObservableGauge         = float64Observable{}
+)
+
+func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
+	return float64Observable{
+		observable: newObservable[float64](m, kind, name, desc, u),
+	}
+}
+
+type int64Observable struct {
+	metric.Int64Observable
+	*observable[int64]
+
+	embedded.Int64ObservableCounter
+	embedded.Int64ObservableUpDownCounter
+	embedded.Int64ObservableGauge
+}
+
+var (
+	_ metric.Int64ObservableCounter       = int64Observable{}
+	_ metric.Int64ObservableUpDownCounter = int64Observable{}
+	_ metric.Int64ObservableGauge         = int64Observable{}
+)
+
+func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
+	return int64Observable{
+		observable: newObservable[int64](m, kind, name, desc, u),
+	}
+}
+
+type observable[N int64 | float64] struct {
+	metric.Observable
+	observablID[N]
+
+	meter           *meter
+	measures        measures[N]
+	dropAggregation bool
+}
+
+func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
+	return &observable[N]{
+		observablID: observablID[N]{
+			name:        name,
+			description: desc,
+			kind:        kind,
+			unit:        u,
+			scope:       m.scope,
+		},
+		meter: m,
+	}
+}
+
+// observe records the val for the set of attrs.
+func (o *observable[N]) observe(val N, s attribute.Set) {
+	o.measures.observe(val, s)
+}
+
+func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
+	o.measures = append(o.measures, meas...)
+}
+
+type measures[N int64 | float64] []aggregate.Measure[N]
+
+// observe records the val for the set of attrs.
+func (m measures[N]) observe(val N, s attribute.Set) {
+	for _, in := range m {
+		in(context.Background(), val, s)
+	}
+}
+
+var errEmptyAgg = errors.New("no aggregators for observable instrument")
+
+// registerable returns an error if the observable o should not be registered,
+// and nil if it should. An errEmptyAgg error is returned if o is effectively a
+// no-op because it does not have any aggregators. Also, an error is returned
+// if scope defines a Meter other than the one o was created by.
+func (o *observable[N]) registerable(m *meter) error {
+	if len(o.measures) == 0 {
+		return errEmptyAgg
+	}
+	if m != o.meter {
+		return fmt.Errorf(
+			"invalid registration: observable %q from Meter %q, registered with Meter %q",
+			o.name,
+			o.scope.Name,
+			m.scope.Name,
+		)
+	}
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5f9e982c2b902fdb0f47c4c5cc829ad9ff55b92
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
@@ -0,0 +1,29 @@
+// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT.
+
+package metric
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[instrumentKindUndefined-0]
+	_ = x[InstrumentKindCounter-1]
+	_ = x[InstrumentKindUpDownCounter-2]
+	_ = x[InstrumentKindHistogram-3]
+	_ = x[InstrumentKindObservableCounter-4]
+	_ = x[InstrumentKindObservableUpDownCounter-5]
+	_ = x[InstrumentKindObservableGauge-6]
+}
+
+const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGauge"
+
+var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107}
+
+func (i InstrumentKind) String() string {
+	if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
+		return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
new file mode 100644
index 0000000000000000000000000000000000000000..4060a2f76d3c54ed52ae760a1cdb5824bb39e5c9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -0,0 +1,160 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// now is used to return the current local time while allowing tests to
+// override the default time.Now function.
+var now = time.Now
+
+// Measure receives measurements to be aggregated.
+type Measure[N int64 | float64] func(context.Context, N, attribute.Set)
+
+// ComputeAggregation stores the aggregate of measurements into dest and
+// returns the number of aggregate data-points output.
+type ComputeAggregation func(dest *metricdata.Aggregation) int
+
+// Builder builds an aggregate function.
+type Builder[N int64 | float64] struct {
+	// Temporality is the temporality used for the returned aggregate function.
+	//
+	// If this is not provided a default of cumulative will be used (except for
+	// the last-value aggregate function where delta is the only appropriate
+	// temporality).
+	Temporality metricdata.Temporality
+	// Filter is the attribute filter the aggregate function will use on the
+	// input of measurements.
+	Filter attribute.Filter
+	// ReservoirFunc is the factory function used by aggregate functions to
+	// create new exemplar reservoirs for a new seen attribute set.
+	//
+	// If this is not provided a default factory function that returns an
+	// exemplar.Drop reservoir will be used.
+	ReservoirFunc func() exemplar.Reservoir[N]
+	// AggregationLimit is the cardinality limit of measurement attributes. Any
+	// measurement for new attributes once the limit has been reached will be
+	// aggregated into a single aggregate for the "otel.metric.overflow"
+	// attribute.
+	//
+	// If AggregationLimit is less than or equal to zero there will not be an
+	// aggregation limit imposed (i.e. unlimited attribute sets).
+	AggregationLimit int
+}
+
+func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] {
+	if b.ReservoirFunc != nil {
+		return b.ReservoirFunc
+	}
+
+	return exemplar.Drop[N]
+}
+
+type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
+
+func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
+	if b.Filter != nil {
+		fltr := b.Filter // Copy to make it immutable after assignment.
+		return func(ctx context.Context, n N, a attribute.Set) {
+			fAttr, dropped := a.Filter(fltr)
+			f(ctx, n, fAttr, dropped)
+		}
+	}
+	return func(ctx context.Context, n N, a attribute.Set) {
+		f(ctx, n, a, nil)
+	}
+}
+
+// LastValue returns a last-value aggregate function input and output.
+//
+// The Builder.Temporality is ignored and delta is use always.
+func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
+	// Delta temporality is the only temporality that makes semantic sense for
+	// a last-value aggregate.
+	lv := newLastValue[N](b.AggregationLimit, b.resFunc())
+
+	return b.filter(lv.measure), func(dest *metricdata.Aggregation) int {
+		// Ignore if dest is not a metricdata.Gauge. The chance for memory
+		// reuse of the DataPoints is missed (better luck next time).
+		gData, _ := (*dest).(metricdata.Gauge[N])
+		lv.computeAggregation(&gData.DataPoints)
+		*dest = gData
+
+		return len(gData.DataPoints)
+	}
+}
+
+// PrecomputedSum returns a sum aggregate function input and output. The
+// arguments passed to the input are expected to be the precomputed sum values.
+func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
+	s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
+	switch b.Temporality {
+	case metricdata.DeltaTemporality:
+		return b.filter(s.measure), s.delta
+	default:
+		return b.filter(s.measure), s.cumulative
+	}
+}
+
+// Sum returns a sum aggregate function input and output.
+func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
+	s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
+	switch b.Temporality {
+	case metricdata.DeltaTemporality:
+		return b.filter(s.measure), s.delta
+	default:
+		return b.filter(s.measure), s.cumulative
+	}
+}
+
+// ExplicitBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
+	h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+	switch b.Temporality {
+	case metricdata.DeltaTemporality:
+		return b.filter(h.measure), h.delta
+	default:
+		return b.filter(h.measure), h.cumulative
+	}
+}
+
+// ExponentialBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
+	h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+	switch b.Temporality {
+	case metricdata.DeltaTemporality:
+		return b.filter(h.measure), h.delta
+	default:
+		return b.filter(h.measure), h.cumulative
+	}
+}
+
+// reset ensures s has capacity and sets it length. If the capacity of s too
+// small, a new slice is returned with the specified capacity and length.
+func reset[T any](s []T, length, capacity int) []T {
+	if cap(s) < capacity {
+		return make([]T, length, capacity)
+	}
+	return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..e83a2693faf0a16ffc36153d327b23c6f08d656e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package aggregate provides aggregate types used compute aggregations and
+// cycle the state of metric measurements made by the SDK. These types and
+// functionality are meant only for internal SDK use.
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..660b86071aeeae7967943c3501f659c25d78b171
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -0,0 +1,449 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+	"context"
+	"errors"
+	"math"
+	"sync"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+const (
+	expoMaxScale = 20
+	expoMinScale = -10
+
+	smallestNonZeroNormalFloat64 = 0x1p-1022
+
+	// These redefine the Math constants with a type, so the compiler won't coerce
+	// them into an int on 32 bit platforms.
+	maxInt64 int64 = math.MaxInt64
+	minInt64 int64 = math.MinInt64
+)
+
+// expoHistogramDataPoint is a single data point in an exponential histogram.
+type expoHistogramDataPoint[N int64 | float64] struct {
+	res exemplar.Reservoir[N]
+
+	count uint64
+	min   N
+	max   N
+	sum   N
+
+	maxSize  int
+	noMinMax bool
+	noSum    bool
+
+	scale int
+
+	posBuckets expoBuckets
+	negBuckets expoBuckets
+	zeroCount  uint64
+}
+
+func newExpoHistogramDataPoint[N int64 | float64](maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
+	f := math.MaxFloat64
+	max := N(f) // if N is int64, max will overflow to -9223372036854775808
+	min := N(-f)
+	if N(maxInt64) > N(f) {
+		max = N(maxInt64)
+		min = N(minInt64)
+	}
+	return &expoHistogramDataPoint[N]{
+		min:      max,
+		max:      min,
+		maxSize:  maxSize,
+		noMinMax: noMinMax,
+		noSum:    noSum,
+		scale:    maxScale,
+	}
+}
+
+// record adds a new measurement to the histogram. It will rescale the buckets if needed.
+func (p *expoHistogramDataPoint[N]) record(v N) {
+	p.count++
+
+	if !p.noMinMax {
+		if v < p.min {
+			p.min = v
+		}
+		if v > p.max {
+			p.max = v
+		}
+	}
+	if !p.noSum {
+		p.sum += v
+	}
+
+	absV := math.Abs(float64(v))
+
+	if float64(absV) == 0.0 {
+		p.zeroCount++
+		return
+	}
+
+	bin := p.getBin(absV)
+
+	bucket := &p.posBuckets
+	if v < 0 {
+		bucket = &p.negBuckets
+	}
+
+	// If the new bin would make the counts larger than maxScale, we need to
+	// downscale current measurements.
+	if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
+		if p.scale-scaleDelta < expoMinScale {
+			// With a scale of -10 there is only two buckets for the whole range of float64 values.
+			// This can only happen if there is a max size of 1.
+			otel.Handle(errors.New("exponential histogram scale underflow"))
+			return
+		}
+		// Downscale
+		p.scale -= scaleDelta
+		p.posBuckets.downscale(scaleDelta)
+		p.negBuckets.downscale(scaleDelta)
+
+		bin = p.getBin(absV)
+	}
+
+	bucket.record(bin)
+}
+
+// getBin returns the bin v should be recorded into.
+func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
+	frac, exp := math.Frexp(v)
+	if p.scale <= 0 {
+		// Because of the choice of fraction is always 1 power of two higher than we want.
+		correction := 1
+		if frac == .5 {
+			// If v is an exact power of two the frac will be .5 and the exp
+			// will be one higher than we want.
+			correction = 2
+		}
+		return (exp - correction) >> (-p.scale)
+	}
+	return exp<<p.scale + int(math.Log(frac)*scaleFactors[p.scale]) - 1
+}
+
+// scaleFactors are constants used in calculating the logarithm index. They are
+// equivalent to 2^index/log(2).
+var scaleFactors = [21]float64{
+	math.Ldexp(math.Log2E, 0),
+	math.Ldexp(math.Log2E, 1),
+	math.Ldexp(math.Log2E, 2),
+	math.Ldexp(math.Log2E, 3),
+	math.Ldexp(math.Log2E, 4),
+	math.Ldexp(math.Log2E, 5),
+	math.Ldexp(math.Log2E, 6),
+	math.Ldexp(math.Log2E, 7),
+	math.Ldexp(math.Log2E, 8),
+	math.Ldexp(math.Log2E, 9),
+	math.Ldexp(math.Log2E, 10),
+	math.Ldexp(math.Log2E, 11),
+	math.Ldexp(math.Log2E, 12),
+	math.Ldexp(math.Log2E, 13),
+	math.Ldexp(math.Log2E, 14),
+	math.Ldexp(math.Log2E, 15),
+	math.Ldexp(math.Log2E, 16),
+	math.Ldexp(math.Log2E, 17),
+	math.Ldexp(math.Log2E, 18),
+	math.Ldexp(math.Log2E, 19),
+	math.Ldexp(math.Log2E, 20),
+}
+
+// scaleChange returns the magnitude of the scale change needed to fit bin in
+// the bucket. If no scale change is needed 0 is returned.
+func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
+	if length == 0 {
+		// No need to rescale if there are no buckets.
+		return 0
+	}
+
+	low := startBin
+	high := bin
+	if startBin >= bin {
+		low = bin
+		high = startBin + length - 1
+	}
+
+	count := 0
+	for high-low >= p.maxSize {
+		low = low >> 1
+		high = high >> 1
+		count++
+		if count > expoMaxScale-expoMinScale {
+			return count
+		}
+	}
+	return count
+}
+
+// expoBuckets is a set of buckets in an exponential histogram.
+type expoBuckets struct {
+	startBin int
+	counts   []uint64
+}
+
+// record increments the count for the given bin, and expands the buckets if needed.
+// Size changes must be done before calling this function.
+func (b *expoBuckets) record(bin int) {
+	if len(b.counts) == 0 {
+		b.counts = []uint64{1}
+		b.startBin = bin
+		return
+	}
+
+	endBin := b.startBin + len(b.counts) - 1
+
+	// if the new bin is inside the current range
+	if bin >= b.startBin && bin <= endBin {
+		b.counts[bin-b.startBin]++
+		return
+	}
+	// if the new bin is before the current start add spaces to the counts
+	if bin < b.startBin {
+		origLen := len(b.counts)
+		newLength := endBin - bin + 1
+		shift := b.startBin - bin
+
+		if newLength > cap(b.counts) {
+			b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
+		}
+
+		copy(b.counts[shift:origLen+shift], b.counts[:])
+		b.counts = b.counts[:newLength]
+		for i := 1; i < shift; i++ {
+			b.counts[i] = 0
+		}
+		b.startBin = bin
+		b.counts[0] = 1
+		return
+	}
+	// if the new is after the end add spaces to the end
+	if bin > endBin {
+		if bin-b.startBin < cap(b.counts) {
+			b.counts = b.counts[:bin-b.startBin+1]
+			for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
+				b.counts[i] = 0
+			}
+			b.counts[bin-b.startBin] = 1
+			return
+		}
+
+		end := make([]uint64, bin-b.startBin-len(b.counts)+1)
+		b.counts = append(b.counts, end...)
+		b.counts[bin-b.startBin] = 1
+	}
+}
+
+// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
+// correct lower resolution bucket.
+func (b *expoBuckets) downscale(delta int) {
+	// Example
+	// delta = 2
+	// Original offset: -6
+	// Counts: [ 3,  1,  2,  3,  4,  5, 6, 7, 8, 9, 10]
+	// bins:    -6  -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
+	// new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
+	// new Offset: -2
+	// new Counts: [4, 14, 30, 10]
+
+	if len(b.counts) <= 1 || delta < 1 {
+		b.startBin = b.startBin >> delta
+		return
+	}
+
+	steps := 1 << delta
+	offset := b.startBin % steps
+	offset = (offset + steps) % steps // to make offset positive
+	for i := 1; i < len(b.counts); i++ {
+		idx := i + offset
+		if idx%steps == 0 {
+			b.counts[idx/steps] = b.counts[i]
+			continue
+		}
+		b.counts[idx/steps] += b.counts[i]
+	}
+
+	lastIdx := (len(b.counts) - 1 + offset) / steps
+	b.counts = b.counts[:lastIdx+1]
+	b.startBin = b.startBin >> delta
+}
+
+// newExponentialHistogram returns an Aggregator that summarizes a set of
+// measurements as an exponential histogram. Each histogram is scoped by attributes
+// and the aggregation cycle the measurements were made in.
+func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] {
+	return &expoHistogram[N]{
+		noSum:    noSum,
+		noMinMax: noMinMax,
+		maxSize:  int(maxSize),
+		maxScale: int(maxScale),
+
+		newRes: r,
+		limit:  newLimiter[*expoHistogramDataPoint[N]](limit),
+		values: make(map[attribute.Set]*expoHistogramDataPoint[N]),
+
+		start: now(),
+	}
+}
+
+// expoHistogram summarizes a set of measurements as an histogram with exponentially
+// defined buckets.
+type expoHistogram[N int64 | float64] struct {
+	noSum    bool
+	noMinMax bool
+	maxSize  int
+	maxScale int
+
+	newRes   func() exemplar.Reservoir[N]
+	limit    limiter[*expoHistogramDataPoint[N]]
+	values   map[attribute.Set]*expoHistogramDataPoint[N]
+	valuesMu sync.Mutex
+
+	start time.Time
+}
+
+func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+	// Ignore NaN and infinity.
+	if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
+		return
+	}
+
+	t := now()
+
+	e.valuesMu.Lock()
+	defer e.valuesMu.Unlock()
+
+	attr := e.limit.Attributes(fltrAttr, e.values)
+	v, ok := e.values[attr]
+	if !ok {
+		v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+		v.res = e.newRes()
+
+		e.values[attr] = v
+	}
+	v.record(value)
+	v.res.Offer(ctx, t, value, droppedAttr)
+}
+
+func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+	// In that case, use the zero-value h and hope for better alignment next cycle.
+	h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+	h.Temporality = metricdata.DeltaTemporality
+
+	e.valuesMu.Lock()
+	defer e.valuesMu.Unlock()
+
+	n := len(e.values)
+	hDPts := reset(h.DataPoints, n, n)
+
+	var i int
+	for a, b := range e.values {
+		hDPts[i].Attributes = a
+		hDPts[i].StartTime = e.start
+		hDPts[i].Time = t
+		hDPts[i].Count = b.count
+		hDPts[i].Scale = int32(b.scale)
+		hDPts[i].ZeroCount = b.zeroCount
+		hDPts[i].ZeroThreshold = 0.0
+
+		hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin)
+		hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts))
+		copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts)
+
+		hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
+		hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+
+		if !e.noSum {
+			hDPts[i].Sum = b.sum
+		}
+		if !e.noMinMax {
+			hDPts[i].Min = metricdata.NewExtrema(b.min)
+			hDPts[i].Max = metricdata.NewExtrema(b.max)
+		}
+
+		b.res.Collect(&hDPts[i].Exemplars)
+
+		delete(e.values, a)
+		i++
+	}
+	e.start = t
+	h.DataPoints = hDPts
+	*dest = h
+	return n
+}
+
+func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+	// In that case, use the zero-value h and hope for better alignment next cycle.
+	h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+	h.Temporality = metricdata.CumulativeTemporality
+
+	e.valuesMu.Lock()
+	defer e.valuesMu.Unlock()
+
+	n := len(e.values)
+	hDPts := reset(h.DataPoints, n, n)
+
+	var i int
+	for a, b := range e.values {
+		hDPts[i].Attributes = a
+		hDPts[i].StartTime = e.start
+		hDPts[i].Time = t
+		hDPts[i].Count = b.count
+		hDPts[i].Scale = int32(b.scale)
+		hDPts[i].ZeroCount = b.zeroCount
+		hDPts[i].ZeroThreshold = 0.0
+
+		hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin)
+		hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts))
+		copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts)
+
+		hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
+		hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+
+		if !e.noSum {
+			hDPts[i].Sum = b.sum
+		}
+		if !e.noMinMax {
+			hDPts[i].Min = metricdata.NewExtrema(b.min)
+			hDPts[i].Max = metricdata.NewExtrema(b.max)
+		}
+
+		b.res.Collect(&hDPts[i].Exemplars)
+
+		i++
+		// TODO (#3006): This will use an unbounded amount of memory if there
+		// are unbounded number of attribute sets being aggregated. Attribute
+		// sets that become "stale" need to be forgotten so this will not
+		// overload the system.
+	}
+
+	h.DataPoints = hDPts
+	*dest = h
+	return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..a9a4706bf000143d62d7dbab4df7d229fc32a303
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -0,0 +1,249 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+	"context"
+	"sort"
+	"sync"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type buckets[N int64 | float64] struct {
+	res exemplar.Reservoir[N]
+
+	counts   []uint64
+	count    uint64
+	total    N
+	min, max N
+}
+
+// newBuckets returns buckets with n bins.
+func newBuckets[N int64 | float64](n int) *buckets[N] {
+	return &buckets[N]{counts: make([]uint64, n)}
+}
+
+func (b *buckets[N]) sum(value N) { b.total += value }
+
+func (b *buckets[N]) bin(idx int, value N) {
+	b.counts[idx]++
+	b.count++
+	if value < b.min {
+		b.min = value
+	} else if value > b.max {
+		b.max = value
+	}
+}
+
+// histValues summarizes a set of measurements as an histValues with
+// explicitly defined buckets.
+type histValues[N int64 | float64] struct {
+	noSum  bool
+	bounds []float64
+
+	newRes   func() exemplar.Reservoir[N]
+	limit    limiter[*buckets[N]]
+	values   map[attribute.Set]*buckets[N]
+	valuesMu sync.Mutex
+}
+
+func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] {
+	// The responsibility of keeping all buckets correctly associated with the
+	// passed boundaries is ultimately this type's responsibility. Make a copy
+	// here so we can always guarantee this. Or, in the case of failure, have
+	// complete control over the fix.
+	b := make([]float64, len(bounds))
+	copy(b, bounds)
+	sort.Float64s(b)
+	return &histValues[N]{
+		noSum:  noSum,
+		bounds: b,
+		newRes: r,
+		limit:  newLimiter[*buckets[N]](limit),
+		values: make(map[attribute.Set]*buckets[N]),
+	}
+}
+
+// Aggregate records the measurement value, scoped by attr, and aggregates it
+// into a histogram.
+func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+	// This search will return an index in the range [0, len(s.bounds)], where
+	// it will return len(s.bounds) if value is greater than the last element
+	// of s.bounds. This aligns with the buckets in that the length of buckets
+	// is len(s.bounds)+1, with the last bucket representing:
+	// (s.bounds[len(s.bounds)-1], +∞).
+	idx := sort.SearchFloat64s(s.bounds, float64(value))
+
+	t := now()
+
+	s.valuesMu.Lock()
+	defer s.valuesMu.Unlock()
+
+	attr := s.limit.Attributes(fltrAttr, s.values)
+	b, ok := s.values[attr]
+	if !ok {
+		// N+1 buckets. For example:
+		//
+		//   bounds = [0, 5, 10]
+		//
+		// Then,
+		//
+		//   buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
+		b = newBuckets[N](len(s.bounds) + 1)
+		b.res = s.newRes()
+
+		// Ensure min and max are recorded values (not zero), for new buckets.
+		b.min, b.max = value, value
+		s.values[attr] = b
+	}
+	b.bin(idx, value)
+	if !s.noSum {
+		b.sum(value)
+	}
+	b.res.Offer(ctx, t, value, droppedAttr)
+}
+
+// newHistogram returns an Aggregator that summarizes a set of measurements as
+// an histogram.
+func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] {
+	return &histogram[N]{
+		histValues: newHistValues[N](boundaries, noSum, limit, r),
+		noMinMax:   noMinMax,
+		start:      now(),
+	}
+}
+
+// histogram summarizes a set of measurements as an histogram with explicitly
+// defined buckets.
+type histogram[N int64 | float64] struct {
+	*histValues[N]
+
+	noMinMax bool
+	start    time.Time
+}
+
+func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+	// case, use the zero-value h and hope for better alignment next cycle.
+	h, _ := (*dest).(metricdata.Histogram[N])
+	h.Temporality = metricdata.DeltaTemporality
+
+	s.valuesMu.Lock()
+	defer s.valuesMu.Unlock()
+
+	// Do not allow modification of our copy of bounds.
+	bounds := make([]float64, len(s.bounds))
+	copy(bounds, s.bounds)
+
+	n := len(s.values)
+	hDPts := reset(h.DataPoints, n, n)
+
+	var i int
+	for a, b := range s.values {
+		hDPts[i].Attributes = a
+		hDPts[i].StartTime = s.start
+		hDPts[i].Time = t
+		hDPts[i].Count = b.count
+		hDPts[i].Bounds = bounds
+		hDPts[i].BucketCounts = b.counts
+
+		if !s.noSum {
+			hDPts[i].Sum = b.total
+		}
+
+		if !s.noMinMax {
+			hDPts[i].Min = metricdata.NewExtrema(b.min)
+			hDPts[i].Max = metricdata.NewExtrema(b.max)
+		}
+
+		b.res.Collect(&hDPts[i].Exemplars)
+
+		// Unused attribute sets do not report.
+		delete(s.values, a)
+		i++
+	}
+	// The delta collection cycle resets.
+	s.start = t
+
+	h.DataPoints = hDPts
+	*dest = h
+
+	return n
+}
+
+func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+	// case, use the zero-value h and hope for better alignment next cycle.
+	h, _ := (*dest).(metricdata.Histogram[N])
+	h.Temporality = metricdata.CumulativeTemporality
+
+	s.valuesMu.Lock()
+	defer s.valuesMu.Unlock()
+
+	// Do not allow modification of our copy of bounds.
+	bounds := make([]float64, len(s.bounds))
+	copy(bounds, s.bounds)
+
+	n := len(s.values)
+	hDPts := reset(h.DataPoints, n, n)
+
+	var i int
+	for a, b := range s.values {
+		// The HistogramDataPoint field values returned need to be copies of
+		// the buckets value as we will keep updating them.
+		//
+		// TODO (#3047): Making copies for bounds and counts incurs a large
+		// memory allocation footprint. Alternatives should be explored.
+		counts := make([]uint64, len(b.counts))
+		copy(counts, b.counts)
+
+		hDPts[i].Attributes = a
+		hDPts[i].StartTime = s.start
+		hDPts[i].Time = t
+		hDPts[i].Count = b.count
+		hDPts[i].Bounds = bounds
+		hDPts[i].BucketCounts = counts
+
+		if !s.noSum {
+			hDPts[i].Sum = b.total
+		}
+
+		if !s.noMinMax {
+			hDPts[i].Min = metricdata.NewExtrema(b.min)
+			hDPts[i].Max = metricdata.NewExtrema(b.max)
+		}
+
+		b.res.Collect(&hDPts[i].Exemplars)
+
+		i++
+		// TODO (#3006): This will use an unbounded amount of memory if there
+		// are unbounded number of attribute sets being aggregated. Attribute
+		// sets that become "stale" need to be forgotten so this will not
+		// overload the system.
+	}
+
+	h.DataPoints = hDPts
+	*dest = h
+
+	return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
new file mode 100644
index 0000000000000000000000000000000000000000..5699e728f1fc508c72dfe9c72f3d514c9873fced
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// datapoint is timestamped measurement data.
+type datapoint[N int64 | float64] struct {
+	timestamp time.Time
+	value     N
+	res       exemplar.Reservoir[N]
+}
+
+func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] {
+	return &lastValue[N]{
+		newRes: r,
+		limit:  newLimiter[datapoint[N]](limit),
+		values: make(map[attribute.Set]datapoint[N]),
+	}
+}
+
+// lastValue summarizes a set of measurements as the last one made.
+type lastValue[N int64 | float64] struct {
+	sync.Mutex
+
+	newRes func() exemplar.Reservoir[N]
+	limit  limiter[datapoint[N]]
+	values map[attribute.Set]datapoint[N]
+}
+
+func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+	t := now()
+
+	s.Lock()
+	defer s.Unlock()
+
+	attr := s.limit.Attributes(fltrAttr, s.values)
+	d, ok := s.values[attr]
+	if !ok {
+		d.res = s.newRes()
+	}
+
+	d.timestamp = t
+	d.value = value
+	d.res.Offer(ctx, t, value, droppedAttr)
+
+	s.values[attr] = d
+}
+
+func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
+	s.Lock()
+	defer s.Unlock()
+
+	n := len(s.values)
+	*dest = reset(*dest, n, n)
+
+	var i int
+	for a, v := range s.values {
+		(*dest)[i].Attributes = a
+		// The event time is the only meaningful timestamp, StartTime is
+		// ignored.
+		(*dest)[i].Time = v.timestamp
+		(*dest)[i].Value = v.value
+		v.res.Collect(&(*dest)[i].Exemplars)
+		// Do not report stale values.
+		delete(s.values, a)
+		i++
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3de8427200c0550b88019aee70f1cdafed42fe9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// overflowSet is the attribute set used to record a measurement when adding
+// another distinct attribute set to the aggregate would exceed the aggregate
+// limit.
+var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
+
+// limiter limits aggregate values.
+type limiter[V any] struct {
+	// aggLimit is the maximum number of metric streams that can be aggregated.
+	//
+	// Any metric stream with attributes distinct from any set already
+	// aggregated once the aggLimit will be meet will instead be aggregated
+	// into an "overflow" metric stream. That stream will only contain the
+	// "otel.metric.overflow"=true attribute.
+	aggLimit int
+}
+
+// newLimiter returns a new Limiter with the provided aggregation limit.
+func newLimiter[V any](aggregation int) limiter[V] {
+	return limiter[V]{aggLimit: aggregation}
+}
+
+// Attributes checks if adding a measurement for attrs will exceed the
+// aggregation cardinality limit for the existing measurements. If it will,
+// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
+// limit is not set (limit <= 0), attr is returned.
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Set]V) attribute.Set {
+	if l.aggLimit > 0 {
+		_, exists := measurements[attrs]
+		if !exists && len(measurements) >= l.aggLimit-1 {
+			return overflowSet
+		}
+	}
+
+	return attrs
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
new file mode 100644
index 0000000000000000000000000000000000000000..02de2483f3b6434ab30223d380fa6f7db80fb4d1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -0,0 +1,250 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type sumValue[N int64 | float64] struct {
+	n   N
+	res exemplar.Reservoir[N]
+}
+
+// valueMap is the storage for sums.
+type valueMap[N int64 | float64] struct {
+	sync.Mutex
+	newRes func() exemplar.Reservoir[N]
+	limit  limiter[sumValue[N]]
+	values map[attribute.Set]sumValue[N]
+}
+
+func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] {
+	return &valueMap[N]{
+		newRes: r,
+		limit:  newLimiter[sumValue[N]](limit),
+		values: make(map[attribute.Set]sumValue[N]),
+	}
+}
+
+func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+	t := now()
+
+	s.Lock()
+	defer s.Unlock()
+
+	attr := s.limit.Attributes(fltrAttr, s.values)
+	v, ok := s.values[attr]
+	if !ok {
+		v.res = s.newRes()
+	}
+
+	v.n += value
+	v.res.Offer(ctx, t, value, droppedAttr)
+
+	s.values[attr] = v
+}
+
+// newSum returns an aggregator that summarizes a set of measurements as their
+// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
+// the measurements were made in.
+func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] {
+	return &sum[N]{
+		valueMap:  newValueMap[N](limit, r),
+		monotonic: monotonic,
+		start:     now(),
+	}
+}
+
+// sum summarizes a set of measurements made as their arithmetic sum.
+type sum[N int64 | float64] struct {
+	*valueMap[N]
+
+	monotonic bool
+	start     time.Time
+}
+
+func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+	// use the zero-value sData and hope for better alignment next cycle.
+	sData, _ := (*dest).(metricdata.Sum[N])
+	sData.Temporality = metricdata.DeltaTemporality
+	sData.IsMonotonic = s.monotonic
+
+	s.Lock()
+	defer s.Unlock()
+
+	n := len(s.values)
+	dPts := reset(sData.DataPoints, n, n)
+
+	var i int
+	for attr, val := range s.values {
+		dPts[i].Attributes = attr
+		dPts[i].StartTime = s.start
+		dPts[i].Time = t
+		dPts[i].Value = val.n
+		val.res.Collect(&dPts[i].Exemplars)
+		// Do not report stale values.
+		delete(s.values, attr)
+		i++
+	}
+	// The delta collection cycle resets.
+	s.start = t
+
+	sData.DataPoints = dPts
+	*dest = sData
+
+	return n
+}
+
+func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+	// use the zero-value sData and hope for better alignment next cycle.
+	sData, _ := (*dest).(metricdata.Sum[N])
+	sData.Temporality = metricdata.CumulativeTemporality
+	sData.IsMonotonic = s.monotonic
+
+	s.Lock()
+	defer s.Unlock()
+
+	n := len(s.values)
+	dPts := reset(sData.DataPoints, n, n)
+
+	var i int
+	for attr, value := range s.values {
+		dPts[i].Attributes = attr
+		dPts[i].StartTime = s.start
+		dPts[i].Time = t
+		dPts[i].Value = value.n
+		value.res.Collect(&dPts[i].Exemplars)
+		// TODO (#3006): This will use an unbounded amount of memory if there
+		// are unbounded number of attribute sets being aggregated. Attribute
+		// sets that become "stale" need to be forgotten so this will not
+		// overload the system.
+		i++
+	}
+
+	sData.DataPoints = dPts
+	*dest = sData
+
+	return n
+}
+
+// newPrecomputedSum returns an aggregator that summarizes a set of
+// observatrions as their arithmetic sum. Each sum is scoped by attributes and
+// the aggregation cycle the measurements were made in.
+func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] {
+	return &precomputedSum[N]{
+		valueMap:  newValueMap[N](limit, r),
+		monotonic: monotonic,
+		start:     now(),
+	}
+}
+
+// precomputedSum summarizes a set of observatrions as their arithmetic sum.
+type precomputedSum[N int64 | float64] struct {
+	*valueMap[N]
+
+	monotonic bool
+	start     time.Time
+
+	reported map[attribute.Set]N
+}
+
+func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
+	t := now()
+	newReported := make(map[attribute.Set]N)
+
+	// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+	// use the zero-value sData and hope for better alignment next cycle.
+	sData, _ := (*dest).(metricdata.Sum[N])
+	sData.Temporality = metricdata.DeltaTemporality
+	sData.IsMonotonic = s.monotonic
+
+	s.Lock()
+	defer s.Unlock()
+
+	n := len(s.values)
+	dPts := reset(sData.DataPoints, n, n)
+
+	var i int
+	for attr, value := range s.values {
+		delta := value.n - s.reported[attr]
+
+		dPts[i].Attributes = attr
+		dPts[i].StartTime = s.start
+		dPts[i].Time = t
+		dPts[i].Value = delta
+		value.res.Collect(&dPts[i].Exemplars)
+
+		newReported[attr] = value.n
+		// Unused attribute sets do not report.
+		delete(s.values, attr)
+		i++
+	}
+	// Unused attribute sets are forgotten.
+	s.reported = newReported
+	// The delta collection cycle resets.
+	s.start = t
+
+	sData.DataPoints = dPts
+	*dest = sData
+
+	return n
+}
+
+func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
+	t := now()
+
+	// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+	// use the zero-value sData and hope for better alignment next cycle.
+	sData, _ := (*dest).(metricdata.Sum[N])
+	sData.Temporality = metricdata.CumulativeTemporality
+	sData.IsMonotonic = s.monotonic
+
+	s.Lock()
+	defer s.Unlock()
+
+	n := len(s.values)
+	dPts := reset(sData.DataPoints, n, n)
+
+	var i int
+	for attr, val := range s.values {
+		dPts[i].Attributes = attr
+		dPts[i].StartTime = s.start
+		dPts[i].Time = t
+		dPts[i].Value = val.n
+		val.res.Collect(&dPts[i].Exemplars)
+
+		// Unused attribute sets do not report.
+		delete(s.values, attr)
+		i++
+	}
+
+	sData.DataPoints = dPts
+	*dest = sData
+
+	return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..3caeb542c57ab21dde6ad2b5a66f87f145a2e8c2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package exemplar provides an implementation of the OpenTelemetry exemplar
+// reservoir to be used in metric collection pipelines.
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
new file mode 100644
index 0000000000000000000000000000000000000000..39bf37b9e948bbdd1349470391e1a29ee5b8882c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Drop returns a [Reservoir] that drops all measurements it is offered.
+func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} }
+
+type dropRes[N int64 | float64] struct{}
+
+// Offer does nothing, all measurements offered will be dropped.
+func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {}
+
+// Collect resets dest. No exemplars will ever be returned.
+func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+	*dest = (*dest)[:0]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f5946fb9660f986fa14ae0d5724c76f8c042249
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements
+// to r if the passed context associated with the measurement contains a sampled
+// [go.opentelemetry.io/otel/trace.SpanContext].
+func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] {
+	return filtered[N]{Reservoir: r}
+}
+
+type filtered[N int64 | float64] struct {
+	Reservoir[N]
+}
+
+func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+	if trace.SpanContextFromContext(ctx).IsSampled() {
+		f.Reservoir.Offer(ctx, t, n, a)
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f4fe5524b16ded28efa767ac2d9f7dbbadc22ca
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+	"context"
+	"sort"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// Histogram returns a [Reservoir] that samples the last measurement that falls
+// within a histogram bucket. The histogram bucket upper-boundaries are define
+// by bounds.
+//
+// The passed bounds will be sorted by this function.
+func Histogram[N int64 | float64](bounds []float64) Reservoir[N] {
+	sort.Float64s(bounds)
+	return &histRes[N]{
+		bounds:  bounds,
+		storage: newStorage[N](len(bounds) + 1),
+	}
+}
+
+type histRes[N int64 | float64] struct {
+	*storage[N]
+
+	// bounds are bucket bounds in ascending order.
+	bounds []float64
+}
+
+func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+	r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f9fda5b489084306e6110a9c502e6e68785eebe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+	"context"
+	"math"
+	"math/rand"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// rng is used to make sampling decisions.
+//
+// Do not use crypto/rand. There is no reason for the decrease in performance
+// given this is not a security sensitive decision.
+var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// random returns, as a float64, a uniform pseudo-random number in the open
+// interval (0.0,1.0).
+func random() float64 {
+	// TODO: This does not return a uniform number. rng.Float64 returns a
+	// uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
+	// returns multiples of 2^-53, and not all floating point numbers between 0
+	// and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
+	// are always going to be 0).
+	//
+	// An alternative algorithm should be considered that will actually return
+	// a uniform number in the interval (0,1). For example, since the default
+	// rand source provides a uniform distribution for Int63, this can be
+	// converted following the prototypical code of Mersenne Twister 64 (Takuji
+	// Nishimura and Makoto Matsumoto:
+	// http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
+	//
+	//   (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
+	//
+	// There are likely many other methods to explore here as well.
+
+	f := rng.Float64()
+	for f == 0 {
+		f = rng.Float64()
+	}
+	return f
+}
+
+// FixedSize returns a [Reservoir] that samples at most k exemplars. If there
+// are k or less measurements made, the Reservoir will sample each one. If
+// there are more than k, the Reservoir will then randomly sample all
+// additional measurement with a decreasing probability.
+func FixedSize[N int64 | float64](k int) Reservoir[N] {
+	r := &randRes[N]{storage: newStorage[N](k)}
+	r.reset()
+	return r
+}
+
+type randRes[N int64 | float64] struct {
+	*storage[N]
+
+	// count is the number of measurement seen.
+	count int64
+	// next is the next count that will store a measurement at a random index
+	// once the reservoir has been filled.
+	next int64
+	// w is the largest random number in a distribution that is used to compute
+	// the next next.
+	w float64
+}
+
+func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+	// The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
+	// 1994). "Reservoir-Sampling Algorithms of Time Complexity
+	// O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
+	// 481–493 (https://dl.acm.org/doi/10.1145/198429.198435).
+	//
+	// A high-level overview of "Algorithm L":
+	//   0) Pre-calculate the random count greater than the storage size when
+	//      an exemplar will be replaced.
+	//   1) Accept all measurements offered until the configured storage size is
+	//      reached.
+	//   2) Loop:
+	//      a) When the pre-calculate count is reached, replace a random
+	//         existing exemplar with the offered measurement.
+	//      b) Calculate the next random count greater than the existing one
+	//         which will replace another exemplars
+	//
+	// The way a "replacement" count is computed is by looking at `n` number of
+	// independent random numbers each corresponding to an offered measurement.
+	// Of these numbers the smallest `k` (the same size as the storage
+	// capacity) of them are kept as a subset. The maximum value in this
+	// subset, called `w` is used to weight another random number generation
+	// for the next count that will be considered.
+	//
+	// By weighting the next count computation like described, it is able to
+	// perform a uniformly-weighted sampling algorithm based on the number of
+	// samples the reservoir has seen so far. The sampling will "slow down" as
+	// more and more samples are offered so as to reduce a bias towards those
+	// offered just prior to the end of the collection.
+	//
+	// This algorithm is preferred because of its balance of simplicity and
+	// performance. It will compute three random numbers (the bulk of
+	// computation time) for each item that becomes part of the reservoir, but
+	// it does not spend any time on items that do not. In particular it has an
+	// asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
+	// measurements offered and k is the reservoir size.
+	//
+	// See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
+	// this and other reservoir sampling algorithms. See
+	// https://github.com/MrAlias/reservoir-sampling for a performance
+	// comparison of reservoir sampling algorithms.
+
+	if int(r.count) < cap(r.store) {
+		r.store[r.count] = newMeasurement(ctx, t, n, a)
+	} else {
+		if r.count == r.next {
+			// Overwrite a random existing measurement with the one offered.
+			idx := int(rng.Int63n(int64(cap(r.store))))
+			r.store[idx] = newMeasurement(ctx, t, n, a)
+			r.advance()
+		}
+	}
+	r.count++
+}
+
+// reset resets r to the initial state.
+func (r *randRes[N]) reset() {
+	// This resets the number of exemplars known.
+	r.count = 0
+	// Random index inserts should only happen after the storage is full.
+	r.next = int64(cap(r.store))
+
+	// Initial random number in the series used to generate r.next.
+	//
+	// This is set before r.advance to reset or initialize the random number
+	// series. Without doing so it would always be 0 or never restart a new
+	// random number series.
+	//
+	// This maps the uniform random number in (0,1) to a geometric distribution
+	// over the same interval. The mean of the distribution is inversely
+	// proportional to the storage capacity.
+	r.w = math.Exp(math.Log(random()) / float64(cap(r.store)))
+
+	r.advance()
+}
+
+// advance updates the count at which the offered measurement will overwrite an
+// existing exemplar.
+func (r *randRes[N]) advance() {
+	// Calculate the next value in the random number series.
+	//
+	// The current value of r.w is based on the max of a distribution of random
+	// numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
+	// of the storage and each `u` in the interval (0,w)). To calculate the
+	// next r.w we use the fact that when the next exemplar is selected to be
+	// included in the storage an existing one will be dropped, and the
+	// corresponding random number in the set used to calculate r.w will also
+	// be replaced. The replacement random number will also be within (0,w),
+	// therefore the next r.w will be based on the same distribution (i.e.
+	// `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
+	// computing the next random number `u` and take r.w as `w * u^(1/k)`.
+	r.w *= math.Exp(math.Log(random()) / float64(cap(r.store)))
+	// Use the new random number in the series to calculate the count of the
+	// next measurement that will be stored.
+	//
+	// Given 0 < r.w < 1, each iteration will result in subsequent r.w being
+	// smaller. This translates here into the next next being selected against
+	// a distribution with a higher mean (i.e. the expected value will increase
+	// and replacements become less likely)
+	//
+	// Important to note, the new r.next will always be at least 1 more than
+	// the last r.next.
+	r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1
+}
+
+func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+	r.storage.Collect(dest)
+	// Call reset here even though it will reset r.count and restart the random
+	// number series. This will persist any old exemplars as long as no new
+	// measurements are offered, but it will also prioritize those new
+	// measurements that are made over the older collection cycle ones.
+	r.reset()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d5276a341167afc18a75554c03c0604ab545a1f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Reservoir holds the sampled exemplar of measurements made.
+type Reservoir[N int64 | float64] interface {
+	// Offer accepts the parameters associated with a measurement. The
+	// parameters will be stored as an exemplar if the Reservoir decides to
+	// sample the measurement.
+	//
+	// The passed ctx needs to contain any baggage or span that were active
+	// when the measurement was made. This information may be used by the
+	// Reservoir in making a sampling decision.
+	//
+	// The time t is the time when the measurement was made. The val and attr
+	// parameters are the value and dropped (filtered) attributes of the
+	// measurement respectively.
+	Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue)
+
+	// Collect returns all the held exemplars.
+	//
+	// The Reservoir state is preserved after this call.
+	Collect(dest *[]metricdata.Exemplar[N])
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2c2b90a351b698bd06fa1ea735ecea659de4631
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
@@ -0,0 +1,107 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// storage is an exemplar storage for [Reservoir] implementations.
+type storage[N int64 | float64] struct {
+	// store are the measurements sampled.
+	//
+	// This does not use []metricdata.Exemplar because it potentially would
+	// require an allocation for trace and span IDs in the hot path of Offer.
+	store []measurement[N]
+}
+
+func newStorage[N int64 | float64](n int) *storage[N] {
+	return &storage[N]{store: make([]measurement[N], n)}
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+	*dest = reset(*dest, len(r.store), len(r.store))
+	var n int
+	for _, m := range r.store {
+		if !m.valid {
+			continue
+		}
+
+		m.Exemplar(&(*dest)[n])
+		n++
+	}
+	*dest = (*dest)[:n]
+}
+
+// measurement is a measurement made by a telemetry system.
+type measurement[N int64 | float64] struct {
+	// FilteredAttributes are the attributes dropped during the measurement.
+	FilteredAttributes []attribute.KeyValue
+	// Time is the time when the measurement was made.
+	Time time.Time
+	// Value is the value of the measurement.
+	Value N
+	// SpanContext is the SpanContext active when a measurement was made.
+	SpanContext trace.SpanContext
+
+	valid bool
+}
+
+// newMeasurement returns a new non-empty Measurement.
+func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] {
+	return measurement[N]{
+		FilteredAttributes: droppedAttr,
+		Time:               ts,
+		Value:              v,
+		SpanContext:        trace.SpanContextFromContext(ctx),
+		valid:              true,
+	}
+}
+
+// Exemplar returns m as a [metricdata.Exemplar].
+func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) {
+	dest.FilteredAttributes = m.FilteredAttributes
+	dest.Time = m.Time
+	dest.Value = m.Value
+
+	if m.SpanContext.HasTraceID() {
+		traceID := m.SpanContext.TraceID()
+		dest.TraceID = traceID[:]
+	} else {
+		dest.TraceID = dest.TraceID[:0]
+	}
+
+	if m.SpanContext.HasSpanID() {
+		spanID := m.SpanContext.SpanID()
+		dest.SpanID = spanID[:]
+	} else {
+		dest.SpanID = dest.SpanID[:0]
+	}
+}
+
+func reset[T any](s []T, length, capacity int) []T {
+	if cap(s) < capacity {
+		return make([]T, length, capacity)
+	}
+	return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
new file mode 100644
index 0000000000000000000000000000000000000000..9695492b0d111ba288b6c5341c1ddd23370776e5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
+
+// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
+// equal to n. Otherwise, it returns a new []T with capacity equal to n.
+func ReuseSlice[T any](slice []T, n int) []T {
+	if cap(slice) >= n {
+		return slice[:n]
+	}
+	return make([]T, n)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
new file mode 100644
index 0000000000000000000000000000000000000000..541160f9423ae11ed52f4baade110aecb1f4f35d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package x contains support for OTel metric SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
+
+import (
+	"os"
+	"strconv"
+	"strings"
+)
+
+var (
+	// Exemplars is an experimental feature flag that defines if exemplars
+	// should be recorded for metric data-points.
+	//
+	// To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
+	// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+	// will also enable this).
+	Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
+		if strings.ToLower(v) == "true" {
+			return v, true
+		}
+		return "", false
+	})
+
+	// CardinalityLimit is an experimental feature flag that defines if
+	// cardinality limits should be applied to the recorded metric data-points.
+	//
+	// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
+	// variable to the integer limit value you want to use.
+	//
+	// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
+	// will disable the cardinality limits.
+	CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
+		n, err := strconv.Atoi(v)
+		if err != nil {
+			return 0, false
+		}
+		return n, true
+	})
+)
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+	key   string
+	parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+	const envKeyRoot = "OTEL_GO_X_"
+	return Feature[T]{
+		key:   envKeyRoot + suffix,
+		parse: parse,
+	}
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+	//
+	// > The SDK MUST interpret an empty value of an environment variable the
+	// > same way as when the variable is unset.
+	vRaw := os.Getenv(f.key)
+	if vRaw == "" {
+		return v, ok
+	}
+	return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+	_, ok := f.Lookup()
+	return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d524de9ea1497ae042b947cd0c2d1cb3a0563ac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
@@ -0,0 +1,214 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ManualReader is a simple Reader that allows an application to
+// read metrics on demand.
+type ManualReader struct {
+	sdkProducer  atomic.Value
+	shutdownOnce sync.Once
+
+	mu                sync.Mutex
+	isShutdown        bool
+	externalProducers atomic.Value
+
+	temporalitySelector TemporalitySelector
+	aggregationSelector AggregationSelector
+}
+
+// Compile time check the manualReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&ManualReader{}: {}}
+
+// NewManualReader returns a Reader which is directly called to collect metrics.
+func NewManualReader(opts ...ManualReaderOption) *ManualReader {
+	cfg := newManualReaderConfig(opts)
+	r := &ManualReader{
+		temporalitySelector: cfg.temporalitySelector,
+		aggregationSelector: cfg.aggregationSelector,
+	}
+	r.externalProducers.Store(cfg.producers)
+	return r
+}
+
+// register stores the sdkProducer which enables the caller
+// to read metrics from the SDK on demand.
+func (mr *ManualReader) register(p sdkProducer) {
+	// Only register once. If producer is already set, do nothing.
+	if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+		msg := "did not register manual reader"
+		global.Error(errDuplicateRegister, msg)
+	}
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality {
+	return mr.temporalitySelector(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive  // import-shadow for method scoped by type.
+	return mr.aggregationSelector(kind)
+}
+
+// Shutdown closes any connections and frees any resources used by the reader.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Shutdown(context.Context) error {
+	err := ErrReaderShutdown
+	mr.shutdownOnce.Do(func() {
+		// Any future call to Collect will now return ErrReaderShutdown.
+		mr.sdkProducer.Store(produceHolder{
+			produce: shutdownProducer{}.produce,
+		})
+		mr.mu.Lock()
+		defer mr.mu.Unlock()
+		mr.isShutdown = true
+		// release references to Producer(s)
+		mr.externalProducers.Store([]Producer{})
+		err = nil
+	})
+	return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+	if rm == nil {
+		return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
+	}
+	p := mr.sdkProducer.Load()
+	if p == nil {
+		return ErrReaderNotRegistered
+	}
+
+	ph, ok := p.(produceHolder)
+	if !ok {
+		// The atomic.Value is entirely in the periodicReader's control so
+		// this should never happen. In the unforeseen case that this does
+		// happen, return an error instead of panicking so a users code does
+		// not halt in the processes.
+		err := fmt.Errorf("manual reader: invalid producer: %T", p)
+		return err
+	}
+
+	err := ph.produce(ctx, rm)
+	if err != nil {
+		return err
+	}
+	var errs []error
+	for _, producer := range mr.externalProducers.Load().([]Producer) {
+		externalMetrics, err := producer.Produce(ctx)
+		if err != nil {
+			errs = append(errs, err)
+		}
+		rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+	}
+
+	global.Debug("ManualReader collection", "Data", rm)
+
+	return unifyErrors(errs)
+}
+
+// MarshalLog returns logging data about the ManualReader.
+func (r *ManualReader) MarshalLog() interface{} {
+	r.mu.Lock()
+	down := r.isShutdown
+	r.mu.Unlock()
+	return struct {
+		Type       string
+		Registered bool
+		Shutdown   bool
+	}{
+		Type:       "ManualReader",
+		Registered: r.sdkProducer.Load() != nil,
+		Shutdown:   down,
+	}
+}
+
+// manualReaderConfig contains configuration options for a ManualReader.
+type manualReaderConfig struct {
+	temporalitySelector TemporalitySelector
+	aggregationSelector AggregationSelector
+	producers           []Producer
+}
+
+// newManualReaderConfig returns a manualReaderConfig configured with options.
+func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
+	cfg := manualReaderConfig{
+		temporalitySelector: DefaultTemporalitySelector,
+		aggregationSelector: DefaultAggregationSelector,
+	}
+	for _, opt := range opts {
+		cfg = opt.applyManual(cfg)
+	}
+	return cfg
+}
+
+// ManualReaderOption applies a configuration option value to a ManualReader.
+type ManualReaderOption interface {
+	applyManual(manualReaderConfig) manualReaderConfig
+}
+
+// WithTemporalitySelector sets the TemporalitySelector a reader will use to
+// determine the Temporality of an instrument based on its kind. If this
+// option is not used, the reader will use the DefaultTemporalitySelector.
+func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption {
+	return temporalitySelectorOption{selector: selector}
+}
+
+type temporalitySelectorOption struct {
+	selector func(instrument InstrumentKind) metricdata.Temporality
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
+	mrc.temporalitySelector = t.selector
+	return mrc
+}
+
+// WithAggregationSelector sets the AggregationSelector a reader will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// or the aggregation explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector AggregationSelector) ManualReaderOption {
+	return aggregationSelectorOption{selector: selector}
+}
+
+type aggregationSelectorOption struct {
+	selector AggregationSelector
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
+	c.aggregationSelector = t.selector
+	return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
new file mode 100644
index 0000000000000000000000000000000000000000..beb7876ec40dc4d83ee57b0b9a31d810ef0990f4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
@@ -0,0 +1,690 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+
+	"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+// ErrInstrumentName indicates the created instrument has an invalid name.
+// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
+var ErrInstrumentName = errors.New("invalid instrument name")
+
+// meter handles the creation and coordination of all metric instruments. A
+// meter represents a single instrumentation scope; all metric telemetry
+// produced by an instrumentation scope will use metric instruments from a
+// single meter.
+type meter struct {
+	embedded.Meter
+
+	scope instrumentation.Scope
+	pipes pipelines
+
+	int64Insts             *cacheWithErr[instID, *int64Inst]
+	float64Insts           *cacheWithErr[instID, *float64Inst]
+	int64ObservableInsts   *cacheWithErr[instID, int64Observable]
+	float64ObservableInsts *cacheWithErr[instID, float64Observable]
+
+	int64Resolver   resolver[int64]
+	float64Resolver resolver[float64]
+}
+
+func newMeter(s instrumentation.Scope, p pipelines) *meter {
+	// viewCache ensures instrument conflicts, including number conflicts, this
+	// meter is asked to create are logged to the user.
+	var viewCache cache[string, instID]
+
+	var int64Insts cacheWithErr[instID, *int64Inst]
+	var float64Insts cacheWithErr[instID, *float64Inst]
+	var int64ObservableInsts cacheWithErr[instID, int64Observable]
+	var float64ObservableInsts cacheWithErr[instID, float64Observable]
+
+	return &meter{
+		scope:                  s,
+		pipes:                  p,
+		int64Insts:             &int64Insts,
+		float64Insts:           &float64Insts,
+		int64ObservableInsts:   &int64ObservableInsts,
+		float64ObservableInsts: &float64ObservableInsts,
+		int64Resolver:          newResolver[int64](p, &viewCache),
+		float64Resolver:        newResolver[float64](p, &viewCache),
+	}
+}
+
+// Compile-time check meter implements metric.Meter.
+var _ metric.Meter = (*meter)(nil)
+
+// Int64Counter returns a new instrument identified by name and configured with
+// options. The instrument is used to synchronously record increasing int64
+// measurements during a computational operation.
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+	cfg := metric.NewInt64CounterConfig(options...)
+	const kind = InstrumentKindCounter
+	p := int64InstProvider{m}
+	i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+	if err != nil {
+		return i, err
+	}
+
+	return i, validateInstrumentName(name)
+}
+
+// Int64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// int64 measurements during a computational operation.
+func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+	cfg := metric.NewInt64UpDownCounterConfig(options...)
+	const kind = InstrumentKindUpDownCounter
+	p := int64InstProvider{m}
+	i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+	if err != nil {
+		return i, err
+	}
+
+	return i, validateInstrumentName(name)
+}
+
+// Int64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of int64 measurements during a computational operation.
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+	cfg := metric.NewInt64HistogramConfig(options...)
+	p := int64InstProvider{m}
+	i, err := p.lookupHistogram(name, cfg)
+	if err != nil {
+		return i, err
+	}
+
+	return i, validateInstrumentName(name)
+}
+
+// int64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
+	key := instID{
+		Name:        id.Name,
+		Description: id.Description,
+		Unit:        id.Unit,
+		Kind:        id.Kind,
+	}
+	if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+		warnRepeatedObservableCallbacks(id)
+	}
+	return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
+		inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+		for _, insert := range m.int64Resolver.inserters {
+			// Connect the measure functions for instruments in this pipeline with the
+			// callbacks for this pipeline.
+			in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+			if err != nil {
+				return inst, err
+			}
+			// Drop aggregation
+			if len(in) == 0 {
+				inst.dropAggregation = true
+				continue
+			}
+			inst.appendMeasures(in)
+			for _, cback := range callbacks {
+				inst := int64Observer{measures: in}
+				fn := cback
+				insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+			}
+		}
+		return inst, validateInstrumentName(id.Name)
+	})
+}
+
+// Int64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+	cfg := metric.NewInt64ObservableCounterConfig(options...)
+	id := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindObservableCounter,
+		Scope:       m.scope,
+	}
+	return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableUpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// int64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+	cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+	id := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindObservableUpDownCounter,
+		Scope:       m.scope,
+	}
+	return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+	cfg := metric.NewInt64ObservableGaugeConfig(options...)
+	id := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindObservableGauge,
+		Scope:       m.scope,
+	}
+	return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64Counter returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record increasing
+// float64 measurements during a computational operation.
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+	cfg := metric.NewFloat64CounterConfig(options...)
+	const kind = InstrumentKindCounter
+	p := float64InstProvider{m}
+	i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+	if err != nil {
+		return i, err
+	}
+
+	return i, validateInstrumentName(name)
+}
+
+// Float64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// float64 measurements during a computational operation.
+func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+	cfg := metric.NewFloat64UpDownCounterConfig(options...)
+	const kind = InstrumentKindUpDownCounter
+	p := float64InstProvider{m}
+	i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+	if err != nil {
+		return i, err
+	}
+
+	return i, validateInstrumentName(name)
+}
+
+// Float64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of float64 measurements during a computational operation.
+func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+	cfg := metric.NewFloat64HistogramConfig(options...)
+	p := float64InstProvider{m}
+	i, err := p.lookupHistogram(name, cfg)
+	if err != nil {
+		return i, err
+	}
+
+	return i, validateInstrumentName(name)
+}
+
+// float64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
+	key := instID{
+		Name:        id.Name,
+		Description: id.Description,
+		Unit:        id.Unit,
+		Kind:        id.Kind,
+	}
+	if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+		warnRepeatedObservableCallbacks(id)
+	}
+	return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
+		inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+		for _, insert := range m.float64Resolver.inserters {
+			// Connect the measure functions for instruments in this pipeline with the
+			// callbacks for this pipeline.
+			in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+			if err != nil {
+				return inst, err
+			}
+			// Drop aggregation
+			if len(in) == 0 {
+				inst.dropAggregation = true
+				continue
+			}
+			inst.appendMeasures(in)
+			for _, cback := range callbacks {
+				inst := float64Observer{measures: in}
+				fn := cback
+				insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+			}
+		}
+		return inst, validateInstrumentName(id.Name)
+	})
+}
+
+// Float64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+	cfg := metric.NewFloat64ObservableCounterConfig(options...)
+	id := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindObservableCounter,
+		Scope:       m.scope,
+	}
+	return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableUpDownCounter returns a new instrument identified by name
+// and configured with options. The instrument is used to asynchronously record
+// float64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+	cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+	id := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindObservableUpDownCounter,
+		Scope:       m.scope,
+	}
+	return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+	cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+	id := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindObservableGauge,
+		Scope:       m.scope,
+	}
+	return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+func validateInstrumentName(name string) error {
+	if len(name) == 0 {
+		return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
+	}
+	if len(name) > 255 {
+		return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name)
+	}
+	if !isAlpha([]rune(name)[0]) {
+		return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name)
+	}
+	if len(name) == 1 {
+		return nil
+	}
+	for _, c := range name[1:] {
+		if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
+			return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
+		}
+	}
+	return nil
+}
+
+func isAlpha(c rune) bool {
+	return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
+}
+
+func isAlphanumeric(c rune) bool {
+	return isAlpha(c) || ('0' <= c && c <= '9')
+}
+
+func warnRepeatedObservableCallbacks(id Instrument) {
+	inst := fmt.Sprintf(
+		"Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+		id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+	)
+	global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
+		"instrument", inst,
+	)
+}
+
+// RegisterCallback registers f to be called each collection cycle so it will
+// make observations for insts during those cycles.
+//
+// The only instruments f can make observations for are insts. All other
+// observations will be dropped and an error will be logged.
+//
+// Only instruments from this meter can be registered with f, an error is
+// returned if other instrument are provided.
+//
+// Only observations made in the callback will be exported. Unlike synchronous
+// instruments, asynchronous callbacks can "forget" attribute sets that are no
+// longer relevant by omitting the observation during the callback.
+//
+// The returned Registration can be used to unregister f.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+	if len(insts) == 0 {
+		// Don't allocate a observer if not needed.
+		return noopRegister{}, nil
+	}
+
+	reg := newObserver()
+	var errs multierror
+	for _, inst := range insts {
+		// Unwrap any global.
+		if u, ok := inst.(interface {
+			Unwrap() metric.Observable
+		}); ok {
+			inst = u.Unwrap()
+		}
+
+		switch o := inst.(type) {
+		case int64Observable:
+			if err := o.registerable(m); err != nil {
+				if !errors.Is(err, errEmptyAgg) {
+					errs.append(err)
+				}
+				continue
+			}
+			reg.registerInt64(o.observablID)
+		case float64Observable:
+			if err := o.registerable(m); err != nil {
+				if !errors.Is(err, errEmptyAgg) {
+					errs.append(err)
+				}
+				continue
+			}
+			reg.registerFloat64(o.observablID)
+		default:
+			// Instrument external to the SDK.
+			return nil, fmt.Errorf("invalid observable: from different implementation")
+		}
+	}
+
+	err := errs.errorOrNil()
+	if reg.len() == 0 {
+		// All insts use drop aggregation or are invalid.
+		return noopRegister{}, err
+	}
+
+	// Some or all instruments were valid.
+	cback := func(ctx context.Context) error { return f(ctx, reg) }
+	return m.pipes.registerMultiCallback(cback), err
+}
+
+type observer struct {
+	embedded.Observer
+
+	float64 map[observablID[float64]]struct{}
+	int64   map[observablID[int64]]struct{}
+}
+
+func newObserver() observer {
+	return observer{
+		float64: make(map[observablID[float64]]struct{}),
+		int64:   make(map[observablID[int64]]struct{}),
+	}
+}
+
+func (r observer) len() int {
+	return len(r.float64) + len(r.int64)
+}
+
+func (r observer) registerFloat64(id observablID[float64]) {
+	r.float64[id] = struct{}{}
+}
+
+func (r observer) registerInt64(id observablID[int64]) {
+	r.int64[id] = struct{}{}
+}
+
+var (
+	errUnknownObserver = errors.New("unknown observable instrument")
+	errUnregObserver   = errors.New("observable instrument not registered for callback")
+)
+
+func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) {
+	var oImpl float64Observable
+	switch conv := o.(type) {
+	case float64Observable:
+		oImpl = conv
+	case interface {
+		Unwrap() metric.Observable
+	}:
+		// Unwrap any global.
+		async := conv.Unwrap()
+		var ok bool
+		if oImpl, ok = async.(float64Observable); !ok {
+			global.Error(errUnknownObserver, "failed to record asynchronous")
+			return
+		}
+	default:
+		global.Error(errUnknownObserver, "failed to record")
+		return
+	}
+
+	if _, registered := r.float64[oImpl.observablID]; !registered {
+		if !oImpl.dropAggregation {
+			global.Error(errUnregObserver, "failed to record",
+				"name", oImpl.name,
+				"description", oImpl.description,
+				"unit", oImpl.unit,
+				"number", fmt.Sprintf("%T", float64(0)),
+			)
+		}
+		return
+	}
+	c := metric.NewObserveConfig(opts)
+	oImpl.observe(v, c.Attributes())
+}
+
+func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
+	var oImpl int64Observable
+	switch conv := o.(type) {
+	case int64Observable:
+		oImpl = conv
+	case interface {
+		Unwrap() metric.Observable
+	}:
+		// Unwrap any global.
+		async := conv.Unwrap()
+		var ok bool
+		if oImpl, ok = async.(int64Observable); !ok {
+			global.Error(errUnknownObserver, "failed to record asynchronous")
+			return
+		}
+	default:
+		global.Error(errUnknownObserver, "failed to record")
+		return
+	}
+
+	if _, registered := r.int64[oImpl.observablID]; !registered {
+		if !oImpl.dropAggregation {
+			global.Error(errUnregObserver, "failed to record",
+				"name", oImpl.name,
+				"description", oImpl.description,
+				"unit", oImpl.unit,
+				"number", fmt.Sprintf("%T", int64(0)),
+			)
+		}
+		return
+	}
+	c := metric.NewObserveConfig(opts)
+	oImpl.observe(v, c.Attributes())
+}
+
+type noopRegister struct{ embedded.Registration }
+
+func (noopRegister) Unregister() error {
+	return nil
+}
+
+// int64InstProvider provides int64 OpenTelemetry instruments.
+type int64InstProvider struct{ *meter }
+
+func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) {
+	inst := Instrument{
+		Name:        name,
+		Description: desc,
+		Unit:        u,
+		Kind:        kind,
+		Scope:       p.scope,
+	}
+	return p.int64Resolver.Aggregators(inst)
+}
+
+func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) {
+	boundaries := cfg.ExplicitBucketBoundaries()
+	aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+	if aggError != nil {
+		// If boundaries are invalid, ignore them.
+		boundaries = nil
+	}
+	inst := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindHistogram,
+		Scope:       p.scope,
+	}
+	measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries)
+	return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
+	return p.meter.int64Insts.Lookup(instID{
+		Name:        name,
+		Description: desc,
+		Unit:        u,
+		Kind:        kind,
+	}, func() (*int64Inst, error) {
+		aggs, err := p.aggs(kind, name, desc, u)
+		return &int64Inst{measures: aggs}, err
+	})
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
+	return p.meter.int64Insts.Lookup(instID{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindHistogram,
+	}, func() (*int64Inst, error) {
+		aggs, err := p.histogramAggs(name, cfg)
+		return &int64Inst{measures: aggs}, err
+	})
+}
+
+// float64InstProvider provides float64 OpenTelemetry instruments.
+type float64InstProvider struct{ *meter }
+
+func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) {
+	inst := Instrument{
+		Name:        name,
+		Description: desc,
+		Unit:        u,
+		Kind:        kind,
+		Scope:       p.scope,
+	}
+	return p.float64Resolver.Aggregators(inst)
+}
+
+func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) {
+	boundaries := cfg.ExplicitBucketBoundaries()
+	aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+	if aggError != nil {
+		// If boundaries are invalid, ignore them.
+		boundaries = nil
+	}
+	inst := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindHistogram,
+		Scope:       p.scope,
+	}
+	measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries)
+	return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
+	return p.meter.float64Insts.Lookup(instID{
+		Name:        name,
+		Description: desc,
+		Unit:        u,
+		Kind:        kind,
+	}, func() (*float64Inst, error) {
+		aggs, err := p.aggs(kind, name, desc, u)
+		return &float64Inst{measures: aggs}, err
+	})
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
+	return p.meter.float64Insts.Lookup(instID{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindHistogram,
+	}, func() (*float64Inst, error) {
+		aggs, err := p.histogramAggs(name, cfg)
+		return &float64Inst{measures: aggs}, err
+	})
+}
+
+type int64Observer struct {
+	embedded.Int64Observer
+	measures[int64]
+}
+
+func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
+	c := metric.NewObserveConfig(opts)
+	o.observe(val, c.Attributes())
+}
+
+type float64Observer struct {
+	embedded.Float64Observer
+	measures[float64]
+}
+
+func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
+	c := metric.NewObserveConfig(opts)
+	o.observe(val, c.Attributes())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
new file mode 100644
index 0000000000000000000000000000000000000000..32c17934fc47d14882f4c499bad7b599ec5cd2fd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
@@ -0,0 +1,307 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+import (
+	"encoding/json"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+)
+
+// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
+// that created them.
+type ResourceMetrics struct {
+	// Resource represents the entity that collected the metrics.
+	Resource *resource.Resource
+	// ScopeMetrics are the collection of metrics with unique Scopes.
+	ScopeMetrics []ScopeMetrics
+}
+
+// ScopeMetrics is a collection of Metrics Produces by a Meter.
+type ScopeMetrics struct {
+	// Scope is the Scope that the Meter was created with.
+	Scope instrumentation.Scope
+	// Metrics are a list of aggregations created by the Meter.
+	Metrics []Metrics
+}
+
+// Metrics is a collection of one or more aggregated timeseries from an Instrument.
+type Metrics struct {
+	// Name is the name of the Instrument that created this data.
+	Name string
+	// Description is the description of the Instrument, which can be used in documentation.
+	Description string
+	// Unit is the unit in which the Instrument reports.
+	Unit string
+	// Data is the aggregated data from an Instrument.
+	Data Aggregation
+}
+
+// Aggregation is the store of data reported by an Instrument.
+// It will be one of: Gauge, Sum, Histogram.
+type Aggregation interface {
+	privateAggregation()
+}
+
+// Gauge represents a measurement of the current value of an instrument.
+type Gauge[N int64 | float64] struct {
+	// DataPoints are the individual aggregated measurements with unique
+	// Attributes.
+	DataPoints []DataPoint[N]
+}
+
+func (Gauge[N]) privateAggregation() {}
+
+// Sum represents the sum of all measurements of values from an instrument.
+type Sum[N int64 | float64] struct {
+	// DataPoints are the individual aggregated measurements with unique
+	// Attributes.
+	DataPoints []DataPoint[N]
+	// Temporality describes if the aggregation is reported as the change from the
+	// last report time, or the cumulative changes since a fixed start time.
+	Temporality Temporality
+	// IsMonotonic represents if this aggregation only increases or decreases.
+	IsMonotonic bool
+}
+
+func (Sum[N]) privateAggregation() {}
+
+// DataPoint is a single data point in a timeseries.
+type DataPoint[N int64 | float64] struct {
+	// Attributes is the set of key value pairs that uniquely identify the
+	// timeseries.
+	Attributes attribute.Set
+	// StartTime is when the timeseries was started. (optional)
+	StartTime time.Time `json:",omitempty"`
+	// Time is the time when the timeseries was recorded. (optional)
+	Time time.Time `json:",omitempty"`
+	// Value is the value of this data point.
+	Value N
+
+	// Exemplars is the sampled Exemplars collected during the timeseries.
+	Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// Histogram represents the histogram of all measurements of values from an instrument.
+type Histogram[N int64 | float64] struct {
+	// DataPoints are the individual aggregated measurements with unique
+	// Attributes.
+	DataPoints []HistogramDataPoint[N]
+	// Temporality describes if the aggregation is reported as the change from the
+	// last report time, or the cumulative changes since a fixed start time.
+	Temporality Temporality
+}
+
+func (Histogram[N]) privateAggregation() {}
+
+// HistogramDataPoint is a single histogram data point in a timeseries.
+type HistogramDataPoint[N int64 | float64] struct {
+	// Attributes is the set of key value pairs that uniquely identify the
+	// timeseries.
+	Attributes attribute.Set
+	// StartTime is when the timeseries was started.
+	StartTime time.Time
+	// Time is the time when the timeseries was recorded.
+	Time time.Time
+
+	// Count is the number of updates this histogram has been calculated with.
+	Count uint64
+	// Bounds are the upper bounds of the buckets of the histogram. Because the
+	// last boundary is +infinity this one is implied.
+	Bounds []float64
+	// BucketCounts is the count of each of the buckets.
+	BucketCounts []uint64
+
+	// Min is the minimum value recorded. (optional)
+	Min Extrema[N]
+	// Max is the maximum value recorded. (optional)
+	Max Extrema[N]
+	// Sum is the sum of the values recorded.
+	Sum N
+
+	// Exemplars is the sampled Exemplars collected during the timeseries.
+	Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialHistogram represents the histogram of all measurements of values from an instrument.
+type ExponentialHistogram[N int64 | float64] struct {
+	// DataPoints are the individual aggregated measurements with unique
+	// attributes.
+	DataPoints []ExponentialHistogramDataPoint[N]
+	// Temporality describes if the aggregation is reported as the change from the
+	// last report time, or the cumulative changes since a fixed start time.
+	Temporality Temporality
+}
+
+func (ExponentialHistogram[N]) privateAggregation() {}
+
+// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries.
+type ExponentialHistogramDataPoint[N int64 | float64] struct {
+	// Attributes is the set of key value pairs that uniquely identify the
+	// timeseries.
+	Attributes attribute.Set
+	// StartTime is when the timeseries was started.
+	StartTime time.Time
+	// Time is the time when the timeseries was recorded.
+	Time time.Time
+
+	// Count is the number of updates this histogram has been calculated with.
+	Count uint64
+	// Min is the minimum value recorded. (optional)
+	Min Extrema[N]
+	// Max is the maximum value recorded. (optional)
+	Max Extrema[N]
+	// Sum is the sum of the values recorded.
+	Sum N
+
+	// Scale describes the resolution of the histogram. Boundaries are
+	// located at powers of the base, where:
+	//
+	//   base = 2 ^ (2 ^ -Scale)
+	Scale int32
+	// ZeroCount is the number of values whose absolute value
+	// is less than or equal to [ZeroThreshold].
+	// When ZeroThreshold is 0, this is the number of values that
+	// cannot be expressed using the standard exponential formula
+	// as well as values that have been rounded to zero.
+	// ZeroCount represents the special zero count bucket.
+	ZeroCount uint64
+
+	// PositiveBucket is range of positive value bucket counts.
+	PositiveBucket ExponentialBucket
+	// NegativeBucket is range of negative value bucket counts.
+	NegativeBucket ExponentialBucket
+
+	// ZeroThreshold is the width of the zero region. Where the zero region is
+	// defined as the closed interval [-ZeroThreshold, ZeroThreshold].
+	ZeroThreshold float64
+
+	// Exemplars is the sampled Exemplars collected during the timeseries.
+	Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialBucket are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialBucket struct {
+	// Offset is the bucket index of the first entry in the Counts slice.
+	Offset int32
+	// Counts is an slice where Counts[i] carries the count of the bucket at
+	// index (Offset+i). Counts[i] is the count of values greater than
+	// base^(Offset+i) and less than or equal to base^(Offset+i+1).
+	Counts []uint64
+}
+
+// Extrema is the minimum or maximum value of a dataset.
+type Extrema[N int64 | float64] struct {
+	value N
+	valid bool
+}
+
+// MarshalText converts the Extrema value to text.
+func (e Extrema[N]) MarshalText() ([]byte, error) {
+	if !e.valid {
+		return json.Marshal(nil)
+	}
+	return json.Marshal(e.value)
+}
+
+// MarshalJSON converts the Extrema value to JSON number.
+func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
+	return e.MarshalText()
+}
+
+// NewExtrema returns an Extrema set to v.
+func NewExtrema[N int64 | float64](v N) Extrema[N] {
+	return Extrema[N]{value: v, valid: true}
+}
+
+// Value returns the Extrema value and true if the Extrema is defined.
+// Otherwise, if the Extrema is its zero-value, defined will be false.
+func (e Extrema[N]) Value() (v N, defined bool) {
+	return e.value, e.valid
+}
+
+// Exemplar is a measurement sampled from a timeseries providing a typical
+// example.
+type Exemplar[N int64 | float64] struct {
+	// FilteredAttributes are the attributes recorded with the measurement but
+	// filtered out of the timeseries' aggregated data.
+	FilteredAttributes []attribute.KeyValue
+	// Time is the time when the measurement was recorded.
+	Time time.Time
+	// Value is the measured value.
+	Value N
+	// SpanID is the ID of the span that was active during the measurement. If
+	// no span was active or the span was not sampled this will be empty.
+	SpanID []byte `json:",omitempty"`
+	// TraceID is the ID of the trace the active span belonged to during the
+	// measurement. If no span was active or the span was not sampled this will
+	// be empty.
+	TraceID []byte `json:",omitempty"`
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// data type.
+//
+// These data points cannot always be merged in a meaningful way. The Summary
+// type is only used by bridges from other metrics libraries, and cannot be
+// produced using OpenTelemetry instrumentation.
+type Summary struct {
+	// DataPoints are the individual aggregated measurements with unique
+	// attributes.
+	DataPoints []SummaryDataPoint
+}
+
+func (Summary) privateAggregation() {}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+	// Attributes is the set of key value pairs that uniquely identify the
+	// timeseries.
+	Attributes attribute.Set
+
+	// StartTime is when the timeseries was started.
+	StartTime time.Time
+	// Time is the time when the timeseries was recorded.
+	Time time.Time
+
+	// Count is the number of updates this summary has been calculated with.
+	Count uint64
+
+	// Sum is the sum of the values recorded.
+	Sum float64
+
+	// (Optional) list of values at different quantiles of the distribution calculated
+	// from the current snapshot. The quantiles must be strictly increasing.
+	QuantileValues []QuantileValue
+}
+
+// QuantileValue is the value at a given quantile of a summary.
+type QuantileValue struct {
+	// Quantile is the quantile of this value.
+	//
+	// Must be in the interval [0.0, 1.0].
+	Quantile float64
+
+	// Value is the value at the given quantile of a summary.
+	//
+	// Quantile values must NOT be negative.
+	Value float64
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
new file mode 100644
index 0000000000000000000000000000000000000000..9fceb18cba7ae9ebbde41089dca26ca6a2f0d2bf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate stringer -type=Temporality
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+// Temporality defines the window that an aggregation was calculated over.
+type Temporality uint8
+
+const (
+	// undefinedTemporality represents an unset Temporality.
+	//nolint:deadcode,unused,varcheck
+	undefinedTemporality Temporality = iota
+
+	// CumulativeTemporality defines a measurement interval that continues to
+	// expand forward in time from a starting point. New measurements are
+	// added to all previous measurements since a start time.
+	CumulativeTemporality
+
+	// DeltaTemporality defines a measurement interval that resets each cycle.
+	// Measurements from one cycle are recorded independently, measurements
+	// from other cycles do not affect them.
+	DeltaTemporality
+)
+
+// MarshalText returns the byte encoded of t.
+func (t Temporality) MarshalText() ([]byte, error) {
+	return []byte(t.String()), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
new file mode 100644
index 0000000000000000000000000000000000000000..4da833cdce20c1e48abf4fec30075ac8324ea9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
+
+package metricdata
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[undefinedTemporality-0]
+	_ = x[CumulativeTemporality-1]
+	_ = x[DeltaTemporality-2]
+}
+
+const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
+
+var _Temporality_index = [...]uint8{0, 20, 41, 57}
+
+func (i Temporality) String() string {
+	if i >= Temporality(len(_Temporality_index)-1) {
+		return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..ff86999c75954dc05de3741fe733f95ace3c63d3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
@@ -0,0 +1,381 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Default periodic reader timing.
+const (
+	defaultTimeout  = time.Millisecond * 30000
+	defaultInterval = time.Millisecond * 60000
+)
+
+// periodicReaderConfig contains configuration options for a PeriodicReader.
+type periodicReaderConfig struct {
+	interval  time.Duration
+	timeout   time.Duration
+	producers []Producer
+}
+
+// newPeriodicReaderConfig returns a periodicReaderConfig configured with
+// options.
+func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
+	c := periodicReaderConfig{
+		interval: envDuration(envInterval, defaultInterval),
+		timeout:  envDuration(envTimeout, defaultTimeout),
+	}
+	for _, o := range options {
+		c = o.applyPeriodic(c)
+	}
+	return c
+}
+
+// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
+type PeriodicReaderOption interface {
+	applyPeriodic(periodicReaderConfig) periodicReaderConfig
+}
+
+// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
+type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
+
+// applyPeriodic returns a periodicReaderConfig with option(s) applied.
+func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
+	return o(conf)
+}
+
+// WithTimeout configures the time a PeriodicReader waits for an export to
+// complete before canceling it. This includes an export which occurs as part
+// of Shutdown or ForceFlush if the user passed context does not have a
+// deadline. If the user passed context does have a deadline, it will be used
+// instead.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_TIMEOUT environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 30 seconds
+// is used as the default.
+func WithTimeout(d time.Duration) PeriodicReaderOption {
+	return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+		if d <= 0 {
+			return conf
+		}
+		conf.timeout = d
+		return conf
+	})
+}
+
+// WithInterval configures the intervening time between exports for a
+// PeriodicReader.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_INTERVAL environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 60 seconds
+// is used as the default.
+func WithInterval(d time.Duration) PeriodicReaderOption {
+	return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+		if d <= 0 {
+			return conf
+		}
+		conf.interval = d
+		return conf
+	})
+}
+
+// NewPeriodicReader returns a Reader that collects and exports metric data to
+// the exporter at a defined interval. By default, the returned Reader will
+// collect and export data every 60 seconds, and will cancel any attempts that
+// exceed 30 seconds, collect and export combined. The collect and export time
+// are not counted towards the interval between attempts.
+//
+// The Collect method of the returned Reader continues to gather and return
+// metric data to the user. It will not automatically send that data to the
+// exporter. That is left to the user to accomplish.
+func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader {
+	conf := newPeriodicReaderConfig(options)
+	ctx, cancel := context.WithCancel(context.Background())
+	r := &PeriodicReader{
+		interval: conf.interval,
+		timeout:  conf.timeout,
+		exporter: exporter,
+		flushCh:  make(chan chan error),
+		cancel:   cancel,
+		done:     make(chan struct{}),
+		rmPool: sync.Pool{
+			New: func() interface{} {
+				return &metricdata.ResourceMetrics{}
+			},
+		},
+	}
+	r.externalProducers.Store(conf.producers)
+
+	go func() {
+		defer func() { close(r.done) }()
+		r.run(ctx, conf.interval)
+	}()
+
+	return r
+}
+
+// PeriodicReader is a Reader that continuously collects and exports metric
+// data at a set interval.
+type PeriodicReader struct {
+	sdkProducer atomic.Value
+
+	mu                sync.Mutex
+	isShutdown        bool
+	externalProducers atomic.Value
+
+	interval time.Duration
+	timeout  time.Duration
+	exporter Exporter
+	flushCh  chan chan error
+
+	done         chan struct{}
+	cancel       context.CancelFunc
+	shutdownOnce sync.Once
+
+	rmPool sync.Pool
+}
+
+// Compile time check the periodicReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&PeriodicReader{}: {}}
+
+// newTicker allows testing override.
+var newTicker = time.NewTicker
+
+// run continuously collects and exports metric data at the specified
+// interval. This will run until ctx is canceled or times out.
+func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) {
+	ticker := newTicker(interval)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			err := r.collectAndExport(ctx)
+			if err != nil {
+				otel.Handle(err)
+			}
+		case errCh := <-r.flushCh:
+			errCh <- r.collectAndExport(ctx)
+			ticker.Reset(interval)
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+// register registers p as the producer of this reader.
+func (r *PeriodicReader) register(p sdkProducer) {
+	// Only register once. If producer is already set, do nothing.
+	if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+		msg := "did not register periodic reader"
+		global.Error(errDuplicateRegister, msg)
+	}
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality {
+	return r.exporter.Temporality(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive  // import-shadow for method scoped by type.
+	return r.exporter.Aggregation(kind)
+}
+
+// collectAndExport gather all metric data related to the periodicReader r from
+// the SDK and exports it with r's exporter.
+func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
+	ctx, cancel := context.WithTimeout(ctx, r.timeout)
+	defer cancel()
+
+	// TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
+	rm := r.rmPool.Get().(*metricdata.ResourceMetrics)
+	err := r.Collect(ctx, rm)
+	if err == nil {
+		err = r.export(ctx, rm)
+	}
+	r.rmPool.Put(rm)
+	return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm. The metric
+// data is not exported to the configured exporter, it is left to the caller to
+// handle that if desired.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+	if rm == nil {
+		return errors.New("periodic reader: *metricdata.ResourceMetrics is nil")
+	}
+	// TODO (#3047): When collect is updated to accept output as param, pass rm.
+	return r.collect(ctx, r.sdkProducer.Load(), rm)
+}
+
+// collect unwraps p as a produceHolder and returns its produce results.
+func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
+	if p == nil {
+		return ErrReaderNotRegistered
+	}
+
+	ph, ok := p.(produceHolder)
+	if !ok {
+		// The atomic.Value is entirely in the periodicReader's control so
+		// this should never happen. In the unforeseen case that this does
+		// happen, return an error instead of panicking so a users code does
+		// not halt in the processes.
+		err := fmt.Errorf("periodic reader: invalid producer: %T", p)
+		return err
+	}
+
+	err := ph.produce(ctx, rm)
+	if err != nil {
+		return err
+	}
+	var errs []error
+	for _, producer := range r.externalProducers.Load().([]Producer) {
+		externalMetrics, err := producer.Produce(ctx)
+		if err != nil {
+			errs = append(errs, err)
+		}
+		rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+	}
+
+	global.Debug("PeriodicReader collection", "Data", rm)
+
+	return unifyErrors(errs)
+}
+
+// export exports metric data m using r's exporter.
+func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error {
+	return r.exporter.Export(ctx, m)
+}
+
+// ForceFlush flushes pending telemetry.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
+	// Prioritize the ctx timeout if it is set.
+	if _, ok := ctx.Deadline(); !ok {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, r.timeout)
+		defer cancel()
+	}
+
+	errCh := make(chan error, 1)
+	select {
+	case r.flushCh <- errCh:
+		select {
+		case err := <-errCh:
+			if err != nil {
+				return err
+			}
+			close(errCh)
+		case <-ctx.Done():
+			return ctx.Err()
+		}
+	case <-r.done:
+		return ErrReaderShutdown
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+	return r.exporter.ForceFlush(ctx)
+}
+
+// Shutdown flushes pending telemetry and then stops the export pipeline.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Shutdown(ctx context.Context) error {
+	err := ErrReaderShutdown
+	r.shutdownOnce.Do(func() {
+		// Prioritize the ctx timeout if it is set.
+		if _, ok := ctx.Deadline(); !ok {
+			var cancel context.CancelFunc
+			ctx, cancel = context.WithTimeout(ctx, r.timeout)
+			defer cancel()
+		}
+
+		// Stop the run loop.
+		r.cancel()
+		<-r.done
+
+		// Any future call to Collect will now return ErrReaderShutdown.
+		ph := r.sdkProducer.Swap(produceHolder{
+			produce: shutdownProducer{}.produce,
+		})
+
+		if ph != nil { // Reader was registered.
+			// Flush pending telemetry.
+			m := r.rmPool.Get().(*metricdata.ResourceMetrics)
+			err = r.collect(ctx, ph, m)
+			if err == nil {
+				err = r.export(ctx, m)
+			}
+			r.rmPool.Put(m)
+		}
+
+		sErr := r.exporter.Shutdown(ctx)
+		if err == nil || err == ErrReaderShutdown {
+			err = sErr
+		}
+
+		r.mu.Lock()
+		defer r.mu.Unlock()
+		r.isShutdown = true
+		// release references to Producer(s)
+		r.externalProducers.Store([]Producer{})
+	})
+	return err
+}
+
+// MarshalLog returns logging data about the PeriodicReader.
+func (r *PeriodicReader) MarshalLog() interface{} {
+	r.mu.Lock()
+	down := r.isShutdown
+	r.mu.Unlock()
+	return struct {
+		Type       string
+		Exporter   Exporter
+		Registered bool
+		Shutdown   bool
+		Interval   time.Duration
+		Timeout    time.Duration
+	}{
+		Type:       "PeriodicReader",
+		Exporter:   r.exporter,
+		Registered: r.sdkProducer.Load() != nil,
+		Shutdown:   down,
+		Interval:   r.interval,
+		Timeout:    r.timeout,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
new file mode 100644
index 0000000000000000000000000000000000000000..da39ab961c1b3ef3a6d07f2820e82bf60635c369
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -0,0 +1,658 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"container/list"
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/metric/internal"
+	"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+	"go.opentelemetry.io/otel/sdk/metric/internal/x"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	"go.opentelemetry.io/otel/sdk/resource"
+)
+
+var (
+	errCreatingAggregators     = errors.New("could not create all aggregators")
+	errIncompatibleAggregation = errors.New("incompatible aggregation")
+	errUnknownAggregation      = errors.New("unrecognized aggregation")
+)
+
+// instrumentSync is a synchronization point between a pipeline and an
+// instrument's aggregate function.
+type instrumentSync struct {
+	name        string
+	description string
+	unit        string
+	compAgg     aggregate.ComputeAggregation
+}
+
+func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline {
+	if res == nil {
+		res = resource.Empty()
+	}
+	return &pipeline{
+		resource: res,
+		reader:   reader,
+		views:    views,
+		// aggregations is lazy allocated when needed.
+	}
+}
+
+// pipeline connects all of the instruments created by a meter provider to a Reader.
+// This is the object that will be `Reader.register()` when a meter provider is created.
+//
+// As instruments are created the instrument should be checked if it exists in
+// the views of a the Reader, and if so each aggregate function should be added
+// to the pipeline.
+type pipeline struct {
+	resource *resource.Resource
+
+	reader Reader
+	views  []View
+
+	sync.Mutex
+	aggregations   map[instrumentation.Scope][]instrumentSync
+	callbacks      []func(context.Context) error
+	multiCallbacks list.List
+}
+
+// addSync adds the instrumentSync to pipeline p with scope. This method is not
+// idempotent. Duplicate calls will result in duplicate additions, it is the
+// callers responsibility to ensure this is called with unique values.
+func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
+	p.Lock()
+	defer p.Unlock()
+	if p.aggregations == nil {
+		p.aggregations = map[instrumentation.Scope][]instrumentSync{
+			scope: {iSync},
+		}
+		return
+	}
+	p.aggregations[scope] = append(p.aggregations[scope], iSync)
+}
+
+type multiCallback func(context.Context) error
+
+// addMultiCallback registers a multi-instrument callback to be run when
+// `produce()` is called.
+func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
+	p.Lock()
+	defer p.Unlock()
+	e := p.multiCallbacks.PushBack(c)
+	return func() {
+		p.Lock()
+		p.multiCallbacks.Remove(e)
+		p.Unlock()
+	}
+}
+
+// produce returns aggregated metrics from a single collection.
+//
+// This method is safe to call concurrently.
+func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+	p.Lock()
+	defer p.Unlock()
+
+	var errs multierror
+	for _, c := range p.callbacks {
+		// TODO make the callbacks parallel. ( #3034 )
+		if err := c(ctx); err != nil {
+			errs.append(err)
+		}
+		if err := ctx.Err(); err != nil {
+			rm.Resource = nil
+			rm.ScopeMetrics = rm.ScopeMetrics[:0]
+			return err
+		}
+	}
+	for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
+		// TODO make the callbacks parallel. ( #3034 )
+		f := e.Value.(multiCallback)
+		if err := f(ctx); err != nil {
+			errs.append(err)
+		}
+		if err := ctx.Err(); err != nil {
+			// This means the context expired before we finished running callbacks.
+			rm.Resource = nil
+			rm.ScopeMetrics = rm.ScopeMetrics[:0]
+			return err
+		}
+	}
+
+	rm.Resource = p.resource
+	rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations))
+
+	i := 0
+	for scope, instruments := range p.aggregations {
+		rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments))
+		j := 0
+		for _, inst := range instruments {
+			data := rm.ScopeMetrics[i].Metrics[j].Data
+			if n := inst.compAgg(&data); n > 0 {
+				rm.ScopeMetrics[i].Metrics[j].Name = inst.name
+				rm.ScopeMetrics[i].Metrics[j].Description = inst.description
+				rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit
+				rm.ScopeMetrics[i].Metrics[j].Data = data
+				j++
+			}
+		}
+		rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j]
+		if len(rm.ScopeMetrics[i].Metrics) > 0 {
+			rm.ScopeMetrics[i].Scope = scope
+			i++
+		}
+	}
+
+	rm.ScopeMetrics = rm.ScopeMetrics[:i]
+
+	return errs.errorOrNil()
+}
+
+// inserter facilitates inserting of new instruments from a single scope into a
+// pipeline.
+type inserter[N int64 | float64] struct {
+	// aggregators is a cache that holds aggregate function inputs whose
+	// outputs have been inserted into the underlying reader pipeline. This
+	// cache ensures no duplicate aggregate functions are inserted into the
+	// reader pipeline and if a new request during an instrument creation asks
+	// for the same aggregate function input the same instance is returned.
+	aggregators *cache[instID, aggVal[N]]
+
+	// views is a cache that holds instrument identifiers for all the
+	// instruments a Meter has created, it is provided from the Meter that owns
+	// this inserter. This cache ensures during the creation of instruments
+	// with the same name but different options (e.g. description, unit) a
+	// warning message is logged.
+	views *cache[string, instID]
+
+	pipeline *pipeline
+}
+
+func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] {
+	if vc == nil {
+		vc = &cache[string, instID]{}
+	}
+	return &inserter[N]{
+		aggregators: &cache[instID, aggVal[N]]{},
+		views:       vc,
+		pipeline:    p,
+	}
+}
+
+// Instrument inserts the instrument inst with instUnit into a pipeline. All
+// views the pipeline contains are matched against, and any matching view that
+// creates a unique aggregate function will have its output inserted into the
+// pipeline and its input included in the returned slice.
+//
+// The returned aggregate function inputs are ensured to be deduplicated and
+// unique. If another view in another pipeline that is cached by this
+// inserter's cache has already inserted the same aggregate function for the
+// same instrument, that functions input instance is returned.
+//
+// If another instrument has already been inserted by this inserter, or any
+// other using the same cache, and it conflicts with the instrument being
+// inserted in this call, an aggregate function input matching the arguments
+// will still be returned but an Info level log message will also be logged to
+// the OTel global logger.
+//
+// If the passed instrument would result in an incompatible aggregate function,
+// an error is returned and that aggregate function output is not inserted nor
+// is its input returned.
+//
+// If an instrument is determined to use a Drop aggregation, that instrument is
+// not inserted nor returned.
+func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) {
+	var (
+		matched  bool
+		measures []aggregate.Measure[N]
+	)
+
+	errs := &multierror{wrapped: errCreatingAggregators}
+	seen := make(map[uint64]struct{})
+	for _, v := range i.pipeline.views {
+		stream, match := v(inst)
+		if !match {
+			continue
+		}
+		matched = true
+		in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+		if err != nil {
+			errs.append(err)
+		}
+		if in == nil { // Drop aggregation.
+			continue
+		}
+		if _, ok := seen[id]; ok {
+			// This aggregate function has already been added.
+			continue
+		}
+		seen[id] = struct{}{}
+		measures = append(measures, in)
+	}
+
+	if matched {
+		return measures, errs.errorOrNil()
+	}
+
+	// Apply implicit default view if no explicit matched.
+	stream := Stream{
+		Name:        inst.Name,
+		Description: inst.Description,
+		Unit:        inst.Unit,
+	}
+	in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+	if err != nil {
+		errs.append(err)
+	}
+	if in != nil {
+		// Ensured to have not seen given matched was false.
+		measures = append(measures, in)
+	}
+	return measures, errs.errorOrNil()
+}
+
+// addCallback registers a single instrument callback to be run when
+// `produce()` is called.
+func (i *inserter[N]) addCallback(cback func(context.Context) error) {
+	i.pipeline.Lock()
+	defer i.pipeline.Unlock()
+	i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
+}
+
+var aggIDCount uint64
+
+// aggVal is the cached value in an aggregators cache.
+type aggVal[N int64 | float64] struct {
+	ID      uint64
+	Measure aggregate.Measure[N]
+	Err     error
+}
+
+// readerDefaultAggregation returns the default aggregation for the instrument
+// kind based on the reader's aggregation preferences. This is used unless the
+// aggregation is overridden with a view.
+func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation {
+	aggregation := i.pipeline.reader.aggregation(kind)
+	switch aggregation.(type) {
+	case nil, AggregationDefault:
+		// If the reader returns default or nil use the default selector.
+		aggregation = DefaultAggregationSelector(kind)
+	default:
+		// Deep copy and validate before using.
+		aggregation = aggregation.copy()
+		if err := aggregation.err(); err != nil {
+			orig := aggregation
+			aggregation = DefaultAggregationSelector(kind)
+			global.Error(
+				err, "using default aggregation instead",
+				"aggregation", orig,
+				"replacement", aggregation,
+			)
+		}
+	}
+	return aggregation
+}
+
+// cachedAggregator returns the appropriate aggregate input and output
+// functions for an instrument configuration. If the exact instrument has been
+// created within the inst.Scope, those aggregate function instances will be
+// returned. Otherwise, new computed aggregate functions will be cached and
+// returned.
+//
+// If the instrument configuration conflicts with an instrument that has
+// already been created (e.g. description, unit, data type) a warning will be
+// logged at the "Info" level with the global OTel logger. Valid new aggregate
+// functions for the instrument configuration will still be returned without an
+// error.
+//
+// If the instrument defines an unknown or incompatible aggregation, an error
+// is returned.
+func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) {
+	switch stream.Aggregation.(type) {
+	case nil:
+		// The aggregation was not overridden with a view. Use the aggregation
+		// provided by the reader.
+		stream.Aggregation = readerAggregation
+	case AggregationDefault:
+		// The view explicitly requested the default aggregation.
+		stream.Aggregation = DefaultAggregationSelector(kind)
+	}
+
+	if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
+		return nil, 0, fmt.Errorf(
+			"creating aggregator with instrumentKind: %d, aggregation %v: %w",
+			kind, stream.Aggregation, err,
+		)
+	}
+
+	id := i.instID(kind, stream)
+	// If there is a conflict, the specification says the view should
+	// still be applied and a warning should be logged.
+	i.logConflict(id)
+
+	// If there are requests for the same instrument with different name
+	// casing, the first-seen needs to be returned. Use a normalize ID for the
+	// cache lookup to ensure the correct comparison.
+	normID := id.normalize()
+	cv := i.aggregators.Lookup(normID, func() aggVal[N] {
+		b := aggregate.Builder[N]{
+			Temporality:   i.pipeline.reader.temporality(kind),
+			ReservoirFunc: reservoirFunc[N](stream.Aggregation),
+		}
+		b.Filter = stream.AttributeFilter
+		// A value less than or equal to zero will disable the aggregation
+		// limits for the builder (an all the created aggregates).
+		// CardinalityLimit.Lookup returns 0 by default if unset (or
+		// unrecognized input). Use that value directly.
+		b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
+
+		in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
+		if err != nil {
+			return aggVal[N]{0, nil, err}
+		}
+		if in == nil { // Drop aggregator.
+			return aggVal[N]{0, nil, nil}
+		}
+		i.pipeline.addSync(scope, instrumentSync{
+			// Use the first-seen name casing for this and all subsequent
+			// requests of this instrument.
+			name:        stream.Name,
+			description: stream.Description,
+			unit:        stream.Unit,
+			compAgg:     out,
+		})
+		id := atomic.AddUint64(&aggIDCount, 1)
+		return aggVal[N]{id, in, err}
+	})
+	return cv.Measure, cv.ID, cv.Err
+}
+
+// logConflict validates if an instrument with the same case-insensitive name
+// as id has already been created. If that instrument conflicts with id, a
+// warning is logged.
+func (i *inserter[N]) logConflict(id instID) {
+	// The API specification defines names as case-insensitive. If there is a
+	// different casing of a name it needs to be a conflict.
+	name := id.normalize().Name
+	existing := i.views.Lookup(name, func() instID { return id })
+	if id == existing {
+		return
+	}
+
+	const msg = "duplicate metric stream definitions"
+	args := []interface{}{
+		"names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
+		"descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
+		"kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
+		"units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
+		"numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
+	}
+
+	// The specification recommends logging a suggested view to resolve
+	// conflicts if possible.
+	//
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration
+	if id.Unit != existing.Unit || id.Number != existing.Number {
+		// There is no view resolution for these, don't make a suggestion.
+		global.Warn(msg, args...)
+		return
+	}
+
+	var stream string
+	if id.Name != existing.Name || id.Kind != existing.Kind {
+		stream = `Stream{Name: "{{NEW_NAME}}"}`
+	} else if id.Description != existing.Description {
+		stream = fmt.Sprintf("Stream{Description: %q}", existing.Description)
+	}
+
+	inst := fmt.Sprintf(
+		"Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+		id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+	)
+	args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream))
+
+	global.Warn(msg, args...)
+}
+
+func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
+	var zero N
+	return instID{
+		Name:        stream.Name,
+		Description: stream.Description,
+		Unit:        stream.Unit,
+		Kind:        kind,
+		Number:      fmt.Sprintf("%T", zero),
+	}
+}
+
+// aggregateFunc returns new aggregate functions matching agg, kind, and
+// monotonic. If the agg is unknown or temporality is invalid, an error is
+// returned.
+func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
+	switch a := agg.(type) {
+	case AggregationDefault:
+		return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
+	case AggregationDrop:
+		// Return nil in and out to signify the drop aggregator.
+	case AggregationLastValue:
+		meas, comp = b.LastValue()
+	case AggregationSum:
+		switch kind {
+		case InstrumentKindObservableCounter:
+			meas, comp = b.PrecomputedSum(true)
+		case InstrumentKindObservableUpDownCounter:
+			meas, comp = b.PrecomputedSum(false)
+		case InstrumentKindCounter, InstrumentKindHistogram:
+			meas, comp = b.Sum(true)
+		default:
+			// InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and
+			// instrumentKindUndefined or other invalid instrument kinds.
+			meas, comp = b.Sum(false)
+		}
+	case AggregationExplicitBucketHistogram:
+		var noSum bool
+		switch kind {
+		case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge:
+			// The sum should not be collected for any instrument that can make
+			// negative measurements:
+			// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+			noSum = true
+		}
+		meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum)
+	case AggregationBase2ExponentialHistogram:
+		var noSum bool
+		switch kind {
+		case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge:
+			// The sum should not be collected for any instrument that can make
+			// negative measurements:
+			// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+			noSum = true
+		}
+		meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum)
+
+	default:
+		err = errUnknownAggregation
+	}
+
+	return meas, comp, err
+}
+
+// isAggregatorCompatible checks if the aggregation can be used by the instrument.
+// Current compatibility:
+//
+// | Instrument Kind          | Drop | LastValue | Sum | Histogram | Exponential Histogram |
+// |--------------------------|------|-----------|-----|-----------|-----------------------|
+// | Counter                  | ✓    |           | ✓   | ✓         | ✓                     |
+// | UpDownCounter            | ✓    |           | ✓   | ✓         | ✓                     |
+// | Histogram                | ✓    |           | ✓   | ✓         | ✓                     |
+// | Observable Counter       | ✓    |           | ✓   | ✓         | ✓                     |
+// | Observable UpDownCounter | ✓    |           | ✓   | ✓         | ✓                     |
+// | Observable Gauge         | ✓    | ✓         |     | ✓         | ✓                     |.
+func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
+	switch agg.(type) {
+	case AggregationDefault:
+		return nil
+	case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram:
+		switch kind {
+		case InstrumentKindCounter,
+			InstrumentKindUpDownCounter,
+			InstrumentKindHistogram,
+			InstrumentKindObservableCounter,
+			InstrumentKindObservableUpDownCounter,
+			InstrumentKindObservableGauge:
+			return nil
+		default:
+			return errIncompatibleAggregation
+		}
+	case AggregationSum:
+		switch kind {
+		case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
+			return nil
+		default:
+			// TODO: review need for aggregation check after
+			// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+			return errIncompatibleAggregation
+		}
+	case AggregationLastValue:
+		if kind == InstrumentKindObservableGauge {
+			return nil
+		}
+		// TODO: review need for aggregation check after
+		// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+		return errIncompatibleAggregation
+	case AggregationDrop:
+		return nil
+	default:
+		// This is used passed checking for default, it should be an error at this point.
+		return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
+	}
+}
+
+// pipelines is the group of pipelines connecting Readers with instrument
+// measurement.
+type pipelines []*pipeline
+
+func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines {
+	pipes := make([]*pipeline, 0, len(readers))
+	for _, r := range readers {
+		p := newPipeline(res, r, views)
+		r.register(p)
+		pipes = append(pipes, p)
+	}
+	return pipes
+}
+
+func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
+	unregs := make([]func(), len(p))
+	for i, pipe := range p {
+		unregs[i] = pipe.addMultiCallback(c)
+	}
+	return unregisterFuncs{f: unregs}
+}
+
+type unregisterFuncs struct {
+	embedded.Registration
+	f []func()
+}
+
+func (u unregisterFuncs) Unregister() error {
+	for _, f := range u.f {
+		f()
+	}
+	return nil
+}
+
+// resolver facilitates resolving aggregate functions an instrument calls to
+// aggregate measurements with while updating all pipelines that need to pull
+// from those aggregations.
+type resolver[N int64 | float64] struct {
+	inserters []*inserter[N]
+}
+
+func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] {
+	in := make([]*inserter[N], len(p))
+	for i := range in {
+		in[i] = newInserter[N](p[i], vc)
+	}
+	return resolver[N]{in}
+}
+
+// Aggregators returns the Aggregators that must be updated by the instrument
+// defined by key.
+func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
+	var measures []aggregate.Measure[N]
+
+	errs := &multierror{}
+	for _, i := range r.inserters {
+		in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
+		if err != nil {
+			errs.append(err)
+		}
+		measures = append(measures, in...)
+	}
+	return measures, errs.errorOrNil()
+}
+
+// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
+// defined by key. If boundaries were provided on instrument instantiation, those take precedence
+// over boundaries provided by the reader.
+func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
+	var measures []aggregate.Measure[N]
+
+	errs := &multierror{}
+	for _, i := range r.inserters {
+		agg := i.readerDefaultAggregation(id.Kind)
+		if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
+			histAgg.Boundaries = boundaries
+			agg = histAgg
+		}
+		in, err := i.Instrument(id, agg)
+		if err != nil {
+			errs.append(err)
+		}
+		measures = append(measures, in...)
+	}
+	return measures, errs.errorOrNil()
+}
+
+type multierror struct {
+	wrapped error
+	errors  []string
+}
+
+func (m *multierror) errorOrNil() error {
+	if len(m.errors) == 0 {
+		return nil
+	}
+	if m.wrapped == nil {
+		return errors.New(strings.Join(m.errors, "; "))
+	}
+	return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
+}
+
+func (m *multierror) append(err error) {
+	m.errors = append(m.errors, err.Error())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d1a9183ce12676e8016518b79d10549fa483280
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
@@ -0,0 +1,154 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+	"go.opentelemetry.io/otel/metric/noop"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+)
+
+// MeterProvider handles the creation and coordination of Meters. All Meters
+// created by a MeterProvider will be associated with the same Resource, have
+// the same Views applied to them, and have their produced metric telemetry
+// passed to the configured Readers.
+type MeterProvider struct {
+	embedded.MeterProvider
+
+	pipes  pipelines
+	meters cache[instrumentation.Scope, *meter]
+
+	forceFlush, shutdown func(context.Context) error
+	stopped              atomic.Bool
+}
+
+// Compile-time check MeterProvider implements metric.MeterProvider.
+var _ metric.MeterProvider = (*MeterProvider)(nil)
+
+// NewMeterProvider returns a new and configured MeterProvider.
+//
+// By default, the returned MeterProvider is configured with the default
+// Resource and no Readers. Readers cannot be added after a MeterProvider is
+// created. This means the returned MeterProvider, one created with no
+// Readers, will perform no operations.
+func NewMeterProvider(options ...Option) *MeterProvider {
+	conf := newConfig(options)
+	flush, sdown := conf.readerSignals()
+
+	mp := &MeterProvider{
+		pipes:      newPipelines(conf.res, conf.readers, conf.views),
+		forceFlush: flush,
+		shutdown:   sdown,
+	}
+	// Log after creation so all readers show correctly they are registered.
+	global.Info("MeterProvider created",
+		"Resource", conf.res,
+		"Readers", conf.readers,
+		"Views", len(conf.views),
+	)
+	return mp
+}
+
+// Meter returns a Meter with the given name and configured with options.
+//
+// The name should be the name of the instrumentation scope creating
+// telemetry. This name may be the same as the instrumented code only if that
+// code provides built-in instrumentation.
+//
+// Calls to the Meter method after Shutdown has been called will return Meters
+// that perform no operations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
+	if name == "" {
+		global.Warn("Invalid Meter name.", "name", name)
+	}
+
+	if mp.stopped.Load() {
+		return noop.Meter{}
+	}
+
+	c := metric.NewMeterConfig(options...)
+	s := instrumentation.Scope{
+		Name:      name,
+		Version:   c.InstrumentationVersion(),
+		SchemaURL: c.SchemaURL(),
+	}
+
+	global.Info("Meter created",
+		"Name", s.Name,
+		"Version", s.Version,
+		"SchemaURL", s.SchemaURL,
+	)
+
+	return mp.meters.Lookup(s, func() *meter {
+		return newMeter(s, mp.pipes)
+	})
+}
+
+// ForceFlush flushes all pending telemetry.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// ForceFlush calls ForceFlush(context.Context) error
+// on all Readers that implements this method.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
+	if mp.forceFlush != nil {
+		return mp.forceFlush(ctx)
+	}
+	return nil
+}
+
+// Shutdown shuts down the MeterProvider flushing all pending telemetry and
+// releasing any held computational resources.
+//
+// This call is idempotent. The first call will perform all flush and
+// releasing operations. Subsequent calls will perform no action and will
+// return an error stating this.
+//
+// Measurements made by instruments from meters this MeterProvider created
+// will not be exported after Shutdown is called.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Shutdown(ctx context.Context) error {
+	// Even though it may seem like there is a synchronization issue between the
+	// call to `Store` and checking `shutdown`, the Go concurrency model ensures
+	// that is not the case, as all the atomic operations executed in a program
+	// behave as though executed in some sequentially consistent order. This
+	// definition provides the same semantics as C++'s sequentially consistent
+	// atomics and Java's volatile variables.
+	// See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic.
+
+	mp.stopped.Store(true)
+	if mp.shutdown != nil {
+		return mp.shutdown(ctx)
+	}
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..65cedaf3c075232394f91ceb3ae2031c5afa2130
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -0,0 +1,200 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"context"
+	"fmt"
+
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// errDuplicateRegister is logged by a Reader when an attempt to registered it
+// more than once occurs.
+var errDuplicateRegister = fmt.Errorf("duplicate reader registration")
+
+// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
+// the reader is registered with a MeterProvider.
+var ErrReaderNotRegistered = fmt.Errorf("reader is not registered")
+
+// ErrReaderShutdown is returned if Collect or Shutdown are called after a
+// reader has been Shutdown once.
+var ErrReaderShutdown = fmt.Errorf("reader is shutdown")
+
+// errNonPositiveDuration is logged when an environmental variable
+// has non-positive value.
+var errNonPositiveDuration = fmt.Errorf("non-positive duration")
+
+// Reader is the interface used between the SDK and an
+// exporter.  Control flow is bi-directional through the
+// Reader, since the SDK initiates ForceFlush and Shutdown
+// while the exporter initiates collection.  The Register() method here
+// informs the Reader that it can begin reading, signaling the
+// start of bi-directional control flow.
+//
+// Typically, push-based exporters that are periodic will
+// implement PeroidicExporter themselves and construct a
+// PeriodicReader to satisfy this interface.
+//
+// Pull-based exporters will typically implement Register
+// themselves, since they read on demand.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Reader interface {
+	// register registers a Reader with a MeterProvider.
+	// The producer argument allows the Reader to signal the sdk to collect
+	// and send aggregated metric measurements.
+	register(sdkProducer)
+
+	// temporality reports the Temporality for the instrument kind provided.
+	//
+	// This method needs to be concurrent safe with itself and all the other
+	// Reader methods.
+	temporality(InstrumentKind) metricdata.Temporality
+
+	// aggregation returns what Aggregation to use for an instrument kind.
+	//
+	// This method needs to be concurrent safe with itself and all the other
+	// Reader methods.
+	aggregation(InstrumentKind) Aggregation // nolint:revive  // import-shadow for method scoped by type.
+
+	// Collect gathers and returns all metric data related to the Reader from
+	// the SDK and stores it in out. An error is returned if this is called
+	// after Shutdown or if out is nil.
+	//
+	// This method needs to be concurrent safe, and the cancellation of the
+	// passed context is expected to be honored.
+	Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown flushes all metric measurements held in an export pipeline and releases any
+	// held computational resources.
+	//
+	// This deadline or cancellation of the passed context are honored. An appropriate
+	// error will be returned in these situations. There is no guaranteed that all
+	// telemetry be flushed or all resources have been released in these
+	// situations.
+	//
+	// After Shutdown is called, calls to Collect will perform no operation and instead will return
+	// an error indicating the shutdown state.
+	//
+	// This method needs to be concurrent safe.
+	Shutdown(context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// sdkProducer produces metrics for a Reader.
+type sdkProducer interface {
+	// produce returns aggregated metrics from a single collection.
+	//
+	// This method is safe to call concurrently.
+	produce(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// Producer produces metrics for a Reader from an external source.
+type Producer interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Produce returns aggregated metrics from an external source.
+	//
+	// This method should be safe to call concurrently.
+	Produce(context.Context) ([]metricdata.ScopeMetrics, error)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// produceHolder is used as an atomic.Value to wrap the non-concrete producer
+// type.
+type produceHolder struct {
+	produce func(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// shutdownProducer produces an ErrReaderShutdown error always.
+type shutdownProducer struct{}
+
+// produce returns an ErrReaderShutdown error.
+func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
+	return ErrReaderShutdown
+}
+
+// TemporalitySelector selects the temporality to use based on the InstrumentKind.
+type TemporalitySelector func(InstrumentKind) metricdata.Temporality
+
+// DefaultTemporalitySelector is the default TemporalitySelector used if
+// WithTemporalitySelector is not provided. CumulativeTemporality will be used
+// for all instrument kinds if this TemporalitySelector is used.
+func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
+	return metricdata.CumulativeTemporality
+}
+
+// AggregationSelector selects the aggregation and the parameters to use for
+// that aggregation based on the InstrumentKind.
+//
+// If the Aggregation returned is nil or DefaultAggregation, the selection from
+// DefaultAggregationSelector will be used.
+type AggregationSelector func(InstrumentKind) Aggregation
+
+// DefaultAggregationSelector returns the default aggregation and parameters
+// that will be used to summarize measurement made from an instrument of
+// InstrumentKind. This AggregationSelector using the following selection
+// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum,
+// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue,
+// Histogram ⇨ ExplicitBucketHistogram.
+func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
+	switch ik {
+	case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter:
+		return AggregationSum{}
+	case InstrumentKindObservableGauge:
+		return AggregationLastValue{}
+	case InstrumentKindHistogram:
+		return AggregationExplicitBucketHistogram{
+			Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
+			NoMinMax:   false,
+		}
+	}
+	panic("unknown instrument kind")
+}
+
+// ReaderOption is an option which can be applied to manual or Periodic
+// readers.
+type ReaderOption interface {
+	PeriodicReaderOption
+	ManualReaderOption
+}
+
+// WithProducers registers producers as an external Producer of metric data
+// for this Reader.
+func WithProducer(p Producer) ReaderOption {
+	return producerOption{p: p}
+}
+
+type producerOption struct {
+	p Producer
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig {
+	c.producers = append(c.producers, o.p)
+	return c
+}
+
+// applyPeriodic returns a periodicReaderConfig with option applied.
+func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
+	c.producers = append(c.producers, o.p)
+	return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..7433bf7d410fc712aa956df1af627e52a8210187
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+	return "1.23.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
new file mode 100644
index 0000000000000000000000000000000000000000..65f243befed895ed2ae128de1f9a5c567c2dba45
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
@@ -0,0 +1,128 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+	"errors"
+	"regexp"
+	"strings"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+var (
+	errMultiInst = errors.New("name replacement for multiple instruments")
+	errEmptyView = errors.New("no criteria provided for view")
+
+	emptyView = func(Instrument) (Stream, bool) { return Stream{}, false }
+)
+
+// View is an override to the default behavior of the SDK. It defines how data
+// should be collected for certain instruments. It returns true and the exact
+// Stream to use for matching Instruments. Otherwise, if the view does not
+// match, false is returned.
+type View func(Instrument) (Stream, bool)
+
+// NewView returns a View that applies the Stream mask for all instruments that
+// match criteria. The returned View will only apply mask if all non-zero-value
+// fields of criteria match the corresponding Instrument passed to the view. If
+// no criteria are provided, all field of criteria are their zero-values, a
+// view that matches no instruments is returned. If you need to match a
+// zero-value field, create a View directly.
+//
+// The Name field of criteria supports wildcard pattern matching. The "*"
+// wildcard is recognized as matching zero or more characters, and "?" is
+// recognized as matching exactly one character. For example, a pattern of "*"
+// matches all instrument names.
+//
+// The Stream mask only applies updates for non-zero-value fields. By default,
+// the Instrument the View matches against will be use for the Name,
+// Description, and Unit of the returned Stream and no Aggregation or
+// AttributeFilter are set. All non-zero-value fields of mask are used instead
+// of the default. If you need to zero out an Stream field returned from a
+// View, create a View directly.
+func NewView(criteria Instrument, mask Stream) View {
+	if criteria.empty() {
+		global.Error(
+			errEmptyView, "dropping view",
+			"mask", mask,
+		)
+		return emptyView
+	}
+
+	var matchFunc func(Instrument) bool
+	if strings.ContainsAny(criteria.Name, "*?") {
+		if mask.Name != "" {
+			global.Error(
+				errMultiInst, "dropping view",
+				"criteria", criteria,
+				"mask", mask,
+			)
+			return emptyView
+		}
+
+		// Handle branching here in NewView instead of criteria.matches so
+		// criteria.matches remains inlinable for the simple case.
+		pattern := regexp.QuoteMeta(criteria.Name)
+		pattern = "^" + pattern + "$"
+		pattern = strings.ReplaceAll(pattern, `\?`, ".")
+		pattern = strings.ReplaceAll(pattern, `\*`, ".*")
+		re := regexp.MustCompile(pattern)
+		matchFunc = func(i Instrument) bool {
+			return re.MatchString(i.Name) &&
+				criteria.matchesDescription(i) &&
+				criteria.matchesKind(i) &&
+				criteria.matchesUnit(i) &&
+				criteria.matchesScope(i)
+		}
+	} else {
+		matchFunc = criteria.matches
+	}
+
+	var agg Aggregation
+	if mask.Aggregation != nil {
+		agg = mask.Aggregation.copy()
+		if err := agg.err(); err != nil {
+			global.Error(
+				err, "not using aggregation with view",
+				"criteria", criteria,
+				"mask", mask,
+			)
+			agg = nil
+		}
+	}
+
+	return func(i Instrument) (Stream, bool) {
+		if matchFunc(i) {
+			return Stream{
+				Name:            nonZero(mask.Name, i.Name),
+				Description:     nonZero(mask.Description, i.Description),
+				Unit:            nonZero(mask.Unit, i.Unit),
+				Aggregation:     agg,
+				AttributeFilter: mask.AttributeFilter,
+			}, true
+		}
+		return Stream{}, false
+	}
+}
+
+// nonZero returns v if it is non-zero-valued, otherwise alt.
+func nonZero[T comparable](v, alt T) T {
+	var zero T
+	if v != zero {
+		return v
+	}
+	return alt
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
new file mode 100644
index 0000000000000000000000000000000000000000..aed756c5e7005bb523ec23cf69e6a01f89bb57ff
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -0,0 +1,129 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+)
+
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
+
+// Detector detects OpenTelemetry resource information.
+type Detector interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Detect returns an initialized Resource based on gathered information.
+	// If the source information to construct a Resource contains invalid
+	// values, a Resource is returned with the valid parts of the source
+	// information used for initialization along with an appropriately
+	// wrapped ErrPartialResource error.
+	Detect(ctx context.Context) (*Resource, error)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
+func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
+	r := new(Resource)
+	return r, detect(ctx, r, detectors)
+}
+
+// detect runs all detectors using ctx and merges the result into res. This
+// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
+func detect(ctx context.Context, res *Resource, detectors []Detector) error {
+	var (
+		r    *Resource
+		errs detectErrs
+		err  error
+	)
+
+	for _, detector := range detectors {
+		if detector == nil {
+			continue
+		}
+		r, err = detector.Detect(ctx)
+		if err != nil {
+			errs = append(errs, err)
+			if !errors.Is(err, ErrPartialResource) {
+				continue
+			}
+		}
+		r, err = Merge(res, r)
+		if err != nil {
+			errs = append(errs, err)
+		}
+		*res = *r
+	}
+
+	if len(errs) == 0 {
+		return nil
+	}
+	if errors.Is(errs, ErrSchemaURLConflict) {
+		// If there has been a merge conflict, ensure the resource has no
+		// schema URL.
+		res.schemaURL = ""
+	}
+	return errs
+}
+
+type detectErrs []error
+
+func (e detectErrs) Error() string {
+	errStr := make([]string, len(e))
+	for i, err := range e {
+		errStr[i] = fmt.Sprintf("* %s", err)
+	}
+
+	format := "%d errors occurred detecting resource:\n\t%s"
+	return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t"))
+}
+
+func (e detectErrs) Unwrap() error {
+	switch len(e) {
+	case 0:
+		return nil
+	case 1:
+		return e[0]
+	}
+	return e[1:]
+}
+
+func (e detectErrs) Is(target error) bool {
+	return len(e) != 0 && errors.Is(e[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
new file mode 100644
index 0000000000000000000000000000000000000000..6a2c08293a86ee753bbd1b6c26ff92ba9397f8ed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -0,0 +1,108 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk"
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type (
+	// telemetrySDK is a Detector that provides information about
+	// the OpenTelemetry SDK used.  This Detector is included as a
+	// builtin. If these resource attributes are not wanted, use
+	// the WithTelemetrySDK(nil) or WithoutBuiltin() options to
+	// explicitly disable them.
+	telemetrySDK struct{}
+
+	// host is a Detector that provides information about the host
+	// being run on. This Detector is included as a builtin. If
+	// these resource attributes are not wanted, use the
+	// WithHost(nil) or WithoutBuiltin() options to explicitly
+	// disable them.
+	host struct{}
+
+	stringDetector struct {
+		schemaURL string
+		K         attribute.Key
+		F         func() (string, error)
+	}
+
+	defaultServiceNameDetector struct{}
+)
+
+var (
+	_ Detector = telemetrySDK{}
+	_ Detector = host{}
+	_ Detector = stringDetector{}
+	_ Detector = defaultServiceNameDetector{}
+)
+
+// Detect returns a *Resource that describes the OpenTelemetry SDK used.
+func (telemetrySDK) Detect(context.Context) (*Resource, error) {
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.TelemetrySDKName("opentelemetry"),
+		semconv.TelemetrySDKLanguageGo,
+		semconv.TelemetrySDKVersion(sdk.Version()),
+	), nil
+}
+
+// Detect returns a *Resource that describes the host being run on.
+func (host) Detect(ctx context.Context) (*Resource, error) {
+	return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
+}
+
+// StringDetector returns a Detector that will produce a *Resource
+// containing the string as a value corresponding to k. The resulting Resource
+// will have the specified schemaURL.
+func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
+	return stringDetector{schemaURL: schemaURL, K: k, F: f}
+}
+
+// Detect returns a *Resource that describes the string as a value
+// corresponding to attribute.Key as well as the specific schemaURL.
+func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+	value, err := sd.F()
+	if err != nil {
+		return nil, fmt.Errorf("%s: %w", string(sd.K), err)
+	}
+	a := sd.K.String(value)
+	if !a.Valid() {
+		return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
+	}
+	return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
+}
+
+// Detect implements Detector.
+func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
+	return StringDetector(
+		semconv.SchemaURL,
+		semconv.ServiceNameKey,
+		func() (string, error) {
+			executable, err := os.Executable()
+			if err != nil {
+				return "unknown_service:go", nil
+			}
+			return "unknown_service:" + filepath.Base(executable), nil
+		},
+	).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..f263919f6ec0e83e07f3aa870553674bc1aa81e6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -0,0 +1,206 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// config contains configuration for Resource creation.
+type config struct {
+	// detectors that will be evaluated.
+	detectors []Detector
+	// SchemaURL to associate with the Resource.
+	schemaURL string
+}
+
+// Option is the interface that applies a configuration option.
+type Option interface {
+	// apply sets the Option value of a config.
+	apply(config) config
+}
+
+// WithAttributes adds attributes to the configured Resource.
+func WithAttributes(attributes ...attribute.KeyValue) Option {
+	return WithDetectors(detectAttributes{attributes})
+}
+
+type detectAttributes struct {
+	attributes []attribute.KeyValue
+}
+
+func (d detectAttributes) Detect(context.Context) (*Resource, error) {
+	return NewSchemaless(d.attributes...), nil
+}
+
+// WithDetectors adds detectors to be evaluated for the configured resource.
+func WithDetectors(detectors ...Detector) Option {
+	return detectorsOption{detectors: detectors}
+}
+
+type detectorsOption struct {
+	detectors []Detector
+}
+
+func (o detectorsOption) apply(cfg config) config {
+	cfg.detectors = append(cfg.detectors, o.detectors...)
+	return cfg
+}
+
+// WithFromEnv adds attributes from environment variables to the configured resource.
+func WithFromEnv() Option {
+	return WithDetectors(fromEnv{})
+}
+
+// WithHost adds attributes from the host to the configured resource.
+func WithHost() Option {
+	return WithDetectors(host{})
+}
+
+// WithHostID adds host ID information to the configured resource.
+func WithHostID() Option {
+	return WithDetectors(hostIDDetector{})
+}
+
+// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
+func WithTelemetrySDK() Option {
+	return WithDetectors(telemetrySDK{})
+}
+
+// WithSchemaURL sets the schema URL for the configured resource.
+func WithSchemaURL(schemaURL string) Option {
+	return schemaURLOption(schemaURL)
+}
+
+type schemaURLOption string
+
+func (o schemaURLOption) apply(cfg config) config {
+	cfg.schemaURL = string(o)
+	return cfg
+}
+
+// WithOS adds all the OS attributes to the configured Resource.
+// See individual WithOS* functions to configure specific attributes.
+func WithOS() Option {
+	return WithDetectors(
+		osTypeDetector{},
+		osDescriptionDetector{},
+	)
+}
+
+// WithOSType adds an attribute with the operating system type to the configured Resource.
+func WithOSType() Option {
+	return WithDetectors(osTypeDetector{})
+}
+
+// WithOSDescription adds an attribute with the operating system description to the
+// configured Resource. The formatted string is equivalent to the output of the
+// `uname -snrvm` command.
+func WithOSDescription() Option {
+	return WithDetectors(osDescriptionDetector{})
+}
+
+// WithProcess adds all the Process attributes to the configured Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+//
+// This option is equivalent to calling WithProcessPID,
+// WithProcessExecutableName, WithProcessExecutablePath,
+// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
+// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
+// option function for information about what resource attributes each
+// includes.
+func WithProcess() Option {
+	return WithDetectors(
+		processPIDDetector{},
+		processExecutableNameDetector{},
+		processExecutablePathDetector{},
+		processCommandArgsDetector{},
+		processOwnerDetector{},
+		processRuntimeNameDetector{},
+		processRuntimeVersionDetector{},
+		processRuntimeDescriptionDetector{},
+	)
+}
+
+// WithProcessPID adds an attribute with the process identifier (PID) to the
+// configured Resource.
+func WithProcessPID() Option {
+	return WithDetectors(processPIDDetector{})
+}
+
+// WithProcessExecutableName adds an attribute with the name of the process
+// executable to the configured Resource.
+func WithProcessExecutableName() Option {
+	return WithDetectors(processExecutableNameDetector{})
+}
+
+// WithProcessExecutablePath adds an attribute with the full path to the process
+// executable to the configured Resource.
+func WithProcessExecutablePath() Option {
+	return WithDetectors(processExecutablePathDetector{})
+}
+
+// WithProcessCommandArgs adds an attribute with all the command arguments (including
+// the command/executable itself) as received by the process to the configured
+// Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+func WithProcessCommandArgs() Option {
+	return WithDetectors(processCommandArgsDetector{})
+}
+
+// WithProcessOwner adds an attribute with the username of the user that owns the process
+// to the configured Resource.
+func WithProcessOwner() Option {
+	return WithDetectors(processOwnerDetector{})
+}
+
+// WithProcessRuntimeName adds an attribute with the name of the runtime of this
+// process to the configured Resource.
+func WithProcessRuntimeName() Option {
+	return WithDetectors(processRuntimeNameDetector{})
+}
+
+// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
+// this process to the configured Resource.
+func WithProcessRuntimeVersion() Option {
+	return WithDetectors(processRuntimeVersionDetector{})
+}
+
+// WithProcessRuntimeDescription adds an attribute with an additional description
+// about the runtime of the process to the configured Resource.
+func WithProcessRuntimeDescription() Option {
+	return WithDetectors(processRuntimeDescriptionDetector{})
+}
+
+// WithContainer adds all the Container attributes to the configured Resource.
+// See individual WithContainer* functions to configure specific attributes.
+func WithContainer() Option {
+	return WithDetectors(
+		cgroupContainerIDDetector{},
+	)
+}
+
+// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
+func WithContainerID() Option {
+	return WithDetectors(cgroupContainerIDDetector{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
new file mode 100644
index 0000000000000000000000000000000000000000..c1b47193fe78a86c90e1a7cb7aebe33ab0ea40aa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -0,0 +1,100 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"bufio"
+	"context"
+	"errors"
+	"io"
+	"os"
+	"regexp"
+
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type containerIDProvider func() (string, error)
+
+var (
+	containerID         containerIDProvider = getContainerIDFromCGroup
+	cgroupContainerIDRe                     = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
+)
+
+type cgroupContainerIDDetector struct{}
+
+const cgroupPath = "/proc/self/cgroup"
+
+// Detect returns a *Resource that describes the id of the container.
+// If no container id found, an empty resource will be returned.
+func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+	containerID, err := containerID()
+	if err != nil {
+		return nil, err
+	}
+
+	if containerID == "" {
+		return Empty(), nil
+	}
+	return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
+}
+
+var (
+	defaultOSStat = os.Stat
+	osStat        = defaultOSStat
+
+	defaultOSOpen = func(name string) (io.ReadCloser, error) {
+		return os.Open(name)
+	}
+	osOpen = defaultOSOpen
+)
+
+// getContainerIDFromCGroup returns the id of the container from the cgroup file.
+// If no container id found, an empty string will be returned.
+func getContainerIDFromCGroup() (string, error) {
+	if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
+		// File does not exist, skip
+		return "", nil
+	}
+
+	file, err := osOpen(cgroupPath)
+	if err != nil {
+		return "", err
+	}
+	defer file.Close()
+
+	return getContainerIDFromReader(file), nil
+}
+
+// getContainerIDFromReader returns the id of the container from reader.
+func getContainerIDFromReader(reader io.Reader) string {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		line := scanner.Text()
+
+		if id := getContainerIDFromLine(line); id != "" {
+			return id
+		}
+	}
+	return ""
+}
+
+// getContainerIDFromLine returns the id of the container from one string line.
+func getContainerIDFromLine(line string) string {
+	matches := cgroupContainerIDRe.FindStringSubmatch(line)
+	if len(matches) <= 1 {
+		return ""
+	}
+	return matches[1]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..d55a50b0dc2cb426b20ef01548f2322e31e6ca7d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package resource provides detecting and representing resources.
+//
+// The fundamental struct is a Resource which holds identifying information
+// about the entities for which telemetry is exported.
+//
+// To automatically construct Resources from an environment a Detector
+// interface is defined. Implementations of this interface can be passed to
+// the Detect function to generate a Resource from the merged information.
+//
+// To load a user defined Resource from the environment variable
+// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
+// the value as a list of comma delimited key/value pairs
+// (e.g. `<key1>=<value1>,<key2>=<value2>,...`).
+//
+// While this package provides a stable API,
+// the attributes added by resource detectors may change.
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..be4cbe423ea512a8f19756849bbd4b6842e7eb58
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -0,0 +1,106 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"fmt"
+	"net/url"
+	"os"
+	"strings"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+const (
+	// resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
+	resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
+
+	// svcNameKey is the environment variable name that Service Name information will be read from.
+	svcNameKey = "OTEL_SERVICE_NAME"
+)
+
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
+
+// fromEnv is a Detector that implements the Detector and collects
+// resources from environment.  This Detector is included as a
+// builtin.
+type fromEnv struct{}
+
+// compile time assertion that FromEnv implements Detector interface.
+var _ Detector = fromEnv{}
+
+// Detect collects resources from environment.
+func (fromEnv) Detect(context.Context) (*Resource, error) {
+	attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
+	svcName := strings.TrimSpace(os.Getenv(svcNameKey))
+
+	if attrs == "" && svcName == "" {
+		return Empty(), nil
+	}
+
+	var res *Resource
+
+	if svcName != "" {
+		res = NewSchemaless(semconv.ServiceName(svcName))
+	}
+
+	r2, err := constructOTResources(attrs)
+
+	// Ensure that the resource with the service name from OTEL_SERVICE_NAME
+	// takes precedence, if it was defined.
+	res, err2 := Merge(r2, res)
+
+	if err == nil {
+		err = err2
+	} else if err2 != nil {
+		err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+	}
+
+	return res, err
+}
+
+func constructOTResources(s string) (*Resource, error) {
+	if s == "" {
+		return Empty(), nil
+	}
+	pairs := strings.Split(s, ",")
+	var attrs []attribute.KeyValue
+	var invalid []string
+	for _, p := range pairs {
+		k, v, found := strings.Cut(p, "=")
+		if !found {
+			invalid = append(invalid, p)
+			continue
+		}
+		key := strings.TrimSpace(k)
+		val, err := url.PathUnescape(strings.TrimSpace(v))
+		if err != nil {
+			// Retain original value if decoding fails, otherwise it will be
+			// an empty string.
+			val = v
+			otel.Handle(err)
+		}
+		attrs = append(attrs, attribute.String(key, val))
+	}
+	var err error
+	if len(invalid) > 0 {
+		err = fmt.Errorf("%w: %v", errMissingValue, invalid)
+	}
+	return NewSchemaless(attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
new file mode 100644
index 0000000000000000000000000000000000000000..f579329c2c6747a9b40d2bd4dd7eac1a116ba832
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -0,0 +1,120 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"errors"
+	"strings"
+
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type hostIDProvider func() (string, error)
+
+var defaultHostIDProvider hostIDProvider = platformHostIDReader.read
+
+var hostID = defaultHostIDProvider
+
+type hostIDReader interface {
+	read() (string, error)
+}
+
+type fileReader func(string) (string, error)
+
+type commandExecutor func(string, ...string) (string, error)
+
+// hostIDReaderBSD implements hostIDReader.
+type hostIDReaderBSD struct {
+	execCommand commandExecutor
+	readFile    fileReader
+}
+
+// read attempts to read the machine-id from /etc/hostid. If not found it will
+// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
+// error will be returned.
+func (r *hostIDReaderBSD) read() (string, error) {
+	if result, err := r.readFile("/etc/hostid"); err == nil {
+		return strings.TrimSpace(result), nil
+	}
+
+	if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
+		return strings.TrimSpace(result), nil
+	}
+
+	return "", errors.New("host id not found in: /etc/hostid or kenv")
+}
+
+// hostIDReaderDarwin implements hostIDReader.
+type hostIDReaderDarwin struct {
+	execCommand commandExecutor
+}
+
+// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
+// from the IOPlatformUUID line. If the command fails or the uuid cannot be
+// parsed an error will be returned.
+func (r *hostIDReaderDarwin) read() (string, error) {
+	result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+	if err != nil {
+		return "", err
+	}
+
+	lines := strings.Split(result, "\n")
+	for _, line := range lines {
+		if strings.Contains(line, "IOPlatformUUID") {
+			parts := strings.Split(line, " = ")
+			if len(parts) == 2 {
+				return strings.Trim(parts[1], "\""), nil
+			}
+			break
+		}
+	}
+
+	return "", errors.New("could not parse IOPlatformUUID")
+}
+
+type hostIDReaderLinux struct {
+	readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/machine-id followed by
+// /var/lib/dbus/machine-id. If neither location yields an ID an error will
+// be returned.
+func (r *hostIDReaderLinux) read() (string, error) {
+	if result, err := r.readFile("/etc/machine-id"); err == nil {
+		return strings.TrimSpace(result), nil
+	}
+
+	if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil {
+		return strings.TrimSpace(result), nil
+	}
+
+	return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id")
+}
+
+type hostIDDetector struct{}
+
+// Detect returns a *Resource containing the platform specific host id.
+func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
+	hostID, err := hostID()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.HostID(hostID),
+	), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..1778bbacf05971fa9e61123ad80faeaf4cfc1571
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build dragonfly || freebsd || netbsd || openbsd || solaris
+// +build dragonfly freebsd netbsd openbsd solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderBSD{
+	execCommand: execCommand,
+	readFile:    readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba41409b23caa3438688121de4fc7bc82151e698
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderDarwin{
+	execCommand: execCommand,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..207acb0ed3aa8ecc716704249576aaa50013a5cd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os/exec"
+
+func execCommand(name string, arg ...string) (string, error) {
+	cmd := exec.Command(name, arg...)
+	b, err := cmd.Output()
+	if err != nil {
+		return "", err
+	}
+
+	return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..410579b8fc97893d43bf1c3cb64156d1f46313c2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -0,0 +1,22 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderLinux{
+	readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
new file mode 100644
index 0000000000000000000000000000000000000000..721e3ca6e7d33ee8c6ed9bd32e7ac593b6c8fee7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os"
+
+func readFile(filename string) (string, error) {
+	b, err := os.ReadFile(filename)
+	if err != nil {
+		return "", err
+	}
+
+	return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..89df9d6882e50b17f62159450e8235bfd6cff1fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !darwin
+// +build !dragonfly
+// +build !freebsd
+// +build !linux
+// +build !netbsd
+// +build !openbsd
+// +build !solaris
+// +build !windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// hostIDReaderUnsupported is a placeholder implementation for operating systems
+// for which this project currently doesn't support host.id
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+type hostIDReaderUnsupported struct{}
+
+func (*hostIDReaderUnsupported) read() (string, error) {
+	return "<unknown>", nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b431c6ee6e3a43cec9558b2e1208552036dba76
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -0,0 +1,48 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+// +build windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"golang.org/x/sys/windows/registry"
+)
+
+// implements hostIDReader
+type hostIDReaderWindows struct{}
+
+// read reads MachineGuid from the windows registry key:
+// SOFTWARE\Microsoft\Cryptography
+func (*hostIDReaderWindows) read() (string, error) {
+	k, err := registry.OpenKey(
+		registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
+		registry.QUERY_VALUE|registry.WOW64_64KEY,
+	)
+
+	if err != nil {
+		return "", err
+	}
+	defer k.Close()
+
+	guid, _, err := k.GetStringValue("MachineGuid")
+	if err != nil {
+		return "", err
+	}
+
+	return guid, nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderWindows{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
new file mode 100644
index 0000000000000000000000000000000000000000..8fbf071c17851e18894e99b2133d28f40d1bc5bc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -0,0 +1,100 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"strings"
+
+	"go.opentelemetry.io/otel/attribute"
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type osDescriptionProvider func() (string, error)
+
+var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
+
+var osDescription = defaultOSDescriptionProvider
+
+func setDefaultOSDescriptionProvider() {
+	setOSDescriptionProvider(defaultOSDescriptionProvider)
+}
+
+func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
+	osDescription = osDescriptionProvider
+}
+
+type (
+	osTypeDetector        struct{}
+	osDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the operating system type the
+// service is running on.
+func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+	osType := runtimeOS()
+
+	osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		osTypeAttribute,
+	), nil
+}
+
+// Detect returns a *Resource that describes the operating system the
+// service is running on.
+func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+	description, err := osDescription()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.OSDescription(description),
+	), nil
+}
+
+// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
+// into an OS type attribute with the corresponding value defined by the semantic
+// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
+// and used as the value for the returned OS type attribute.
+func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
+	// the elements in this map are the intersection between
+	// available GOOS values and defined semconv OS types
+	osTypeAttributeMap := map[string]attribute.KeyValue{
+		"aix":       semconv.OSTypeAIX,
+		"darwin":    semconv.OSTypeDarwin,
+		"dragonfly": semconv.OSTypeDragonflyBSD,
+		"freebsd":   semconv.OSTypeFreeBSD,
+		"linux":     semconv.OSTypeLinux,
+		"netbsd":    semconv.OSTypeNetBSD,
+		"openbsd":   semconv.OSTypeOpenBSD,
+		"solaris":   semconv.OSTypeSolaris,
+		"windows":   semconv.OSTypeWindows,
+		"zos":       semconv.OSTypeZOS,
+	}
+
+	var osTypeAttribute attribute.KeyValue
+
+	if attr, ok := osTypeAttributeMap[osType]; ok {
+		osTypeAttribute = attr
+	} else {
+		osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
+	}
+
+	return osTypeAttribute
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..24ec85793de650c42eb8311942aa510db26168ae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -0,0 +1,102 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"encoding/xml"
+	"fmt"
+	"io"
+	"os"
+)
+
+type plist struct {
+	XMLName xml.Name `xml:"plist"`
+	Dict    dict     `xml:"dict"`
+}
+
+type dict struct {
+	Key    []string `xml:"key"`
+	String []string `xml:"string"`
+}
+
+// osRelease builds a string describing the operating system release based on the
+// contents of the property list (.plist) system files. If no .plist files are found,
+// or if the required properties to build the release description string are missing,
+// an empty string is returned instead. The generated string resembles the output of
+// the `sw_vers` commandline program, but in a single-line string. For more information
+// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
+func osRelease() string {
+	file, err := getPlistFile()
+	if err != nil {
+		return ""
+	}
+
+	defer file.Close()
+
+	values, err := parsePlistFile(file)
+	if err != nil {
+		return ""
+	}
+
+	return buildOSRelease(values)
+}
+
+// getPlistFile returns a *os.File pointing to one of the well-known .plist files
+// available on macOS. If no file can be opened, it returns an error.
+func getPlistFile() (*os.File, error) {
+	return getFirstAvailableFile([]string{
+		"/System/Library/CoreServices/SystemVersion.plist",
+		"/System/Library/CoreServices/ServerVersion.plist",
+	})
+}
+
+// parsePlistFile process the file pointed by `file` as a .plist file and returns
+// a map with the key-values for each pair of correlated <key> and <string> elements
+// contained in it.
+func parsePlistFile(file io.Reader) (map[string]string, error) {
+	var v plist
+
+	err := xml.NewDecoder(file).Decode(&v)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(v.Dict.Key) != len(v.Dict.String) {
+		return nil, fmt.Errorf("the number of <key> and <string> elements doesn't match")
+	}
+
+	properties := make(map[string]string, len(v.Dict.Key))
+	for i, key := range v.Dict.Key {
+		properties[key] = v.Dict.String[i]
+	}
+
+	return properties, nil
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
+// and `ProductBuildVersion` properties. If some of these properties are not found,
+// it returns an empty string.
+func buildOSRelease(properties map[string]string) string {
+	productName := properties["ProductName"]
+	productVersion := properties["ProductVersion"]
+	productBuildVersion := properties["ProductBuildVersion"]
+
+	if productName == "" || productVersion == "" || productBuildVersion == "" {
+		return ""
+	}
+
+	return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..c771942deecec4bf6a58ed534ce9c46a04d27b4f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -0,0 +1,154 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+// osRelease builds a string describing the operating system release based on the
+// properties of the os-release file. If no os-release file is found, or if the
+// required properties to build the release description string are missing, an empty
+// string is returned instead. For more information about os-release files, see:
+// https://www.freedesktop.org/software/systemd/man/os-release.html
+func osRelease() string {
+	file, err := getOSReleaseFile()
+	if err != nil {
+		return ""
+	}
+
+	defer file.Close()
+
+	values := parseOSReleaseFile(file)
+
+	return buildOSRelease(values)
+}
+
+// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
+// files, according to their order of preference. If no file can be opened, it
+// returns an error.
+func getOSReleaseFile() (*os.File, error) {
+	return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
+}
+
+// parseOSReleaseFile process the file pointed by `file` as an os-release file and
+// returns a map with the key-values contained in it. Empty lines or lines starting
+// with a '#' character are ignored, as well as lines with the missing key=value
+// separator. Values are unquoted and unescaped.
+func parseOSReleaseFile(file io.Reader) map[string]string {
+	values := make(map[string]string)
+	scanner := bufio.NewScanner(file)
+
+	for scanner.Scan() {
+		line := scanner.Text()
+
+		if skip(line) {
+			continue
+		}
+
+		key, value, ok := parse(line)
+		if ok {
+			values[key] = value
+		}
+	}
+
+	return values
+}
+
+// skip returns true if the line is blank or starts with a '#' character, and
+// therefore should be skipped from processing.
+func skip(line string) bool {
+	line = strings.TrimSpace(line)
+
+	return len(line) == 0 || strings.HasPrefix(line, "#")
+}
+
+// parse attempts to split the provided line on the first '=' character, and then
+// sanitize each side of the split before returning them as a key-value pair.
+func parse(line string) (string, string, bool) {
+	k, v, found := strings.Cut(line, "=")
+
+	if !found || len(k) == 0 {
+		return "", "", false
+	}
+
+	key := strings.TrimSpace(k)
+	value := unescape(unquote(strings.TrimSpace(v)))
+
+	return key, value, true
+}
+
+// unquote checks whether the string `s` is quoted with double or single quotes
+// and, if so, returns a version of the string without them. Otherwise it returns
+// the provided string unchanged.
+func unquote(s string) string {
+	if len(s) < 2 {
+		return s
+	}
+
+	if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
+		return s[1 : len(s)-1]
+	}
+
+	return s
+}
+
+// unescape removes the `\` prefix from some characters that are expected
+// to have it added in front of them for escaping purposes.
+func unescape(s string) string {
+	return strings.NewReplacer(
+		`\$`, `$`,
+		`\"`, `"`,
+		`\'`, `'`,
+		`\\`, `\`,
+		"\\`", "`",
+	).Replace(s)
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It favors a combination of the `NAME` and `VERSION`
+// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
+// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
+// none of these properties are found, it returns an empty string.
+//
+// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
+// Linux distributions, it doesn't include the same detail that can be found on the
+// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
+// other properties can produce "pretty" redundant strings in some cases.
+func buildOSRelease(values map[string]string) string {
+	var osRelease string
+
+	name := values["NAME"]
+	version := values["VERSION"]
+
+	if version == "" {
+		version = values["VERSION_ID"]
+	}
+
+	if name != "" && version != "" {
+		osRelease = fmt.Sprintf("%s %s", name, version)
+	} else {
+		osRelease = values["PRETTY_NAME"]
+	}
+
+	return osRelease
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..1c84afc1852ef806d81834535d53daac05a7b92e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -0,0 +1,90 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+type unameProvider func(buf *unix.Utsname) (err error)
+
+var defaultUnameProvider unameProvider = unix.Uname
+
+var currentUnameProvider = defaultUnameProvider
+
+func setDefaultUnameProvider() {
+	setUnameProvider(defaultUnameProvider)
+}
+
+func setUnameProvider(unameProvider unameProvider) {
+	currentUnameProvider = unameProvider
+}
+
+// platformOSDescription returns a human readable OS version information string.
+// The final string combines OS release information (where available) and the
+// result of the `uname` system call.
+func platformOSDescription() (string, error) {
+	uname, err := uname()
+	if err != nil {
+		return "", err
+	}
+
+	osRelease := osRelease()
+	if osRelease != "" {
+		return fmt.Sprintf("%s (%s)", osRelease, uname), nil
+	}
+
+	return uname, nil
+}
+
+// uname issues a uname(2) system call (or equivalent on systems which doesn't
+// have one) and formats the output in a single string, similar to the output
+// of the `uname` commandline program. The final string resembles the one
+// obtained with a call to `uname -snrvm`.
+func uname() (string, error) {
+	var utsName unix.Utsname
+
+	err := currentUnameProvider(&utsName)
+	if err != nil {
+		return "", err
+	}
+
+	return fmt.Sprintf("%s %s %s %s %s",
+		unix.ByteSliceToString(utsName.Sysname[:]),
+		unix.ByteSliceToString(utsName.Nodename[:]),
+		unix.ByteSliceToString(utsName.Release[:]),
+		unix.ByteSliceToString(utsName.Version[:]),
+		unix.ByteSliceToString(utsName.Machine[:]),
+	), nil
+}
+
+// getFirstAvailableFile returns an *os.File of the first available
+// file from a list of candidate file paths.
+func getFirstAvailableFile(candidates []string) (*os.File, error) {
+	for _, c := range candidates {
+		file, err := os.Open(c)
+		if err == nil {
+			return file, nil
+		}
+	}
+
+	return nil, fmt.Errorf("no candidate file available: %v", candidates)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ebcb534f28eb8b7d5e56c536a4f18ff1fa61afc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !aix
+// +build !darwin
+// +build !dragonfly
+// +build !freebsd
+// +build !linux
+// +build !netbsd
+// +build !openbsd
+// +build !solaris
+// +build !windows
+// +build !zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// platformOSDescription is a placeholder implementation for OSes
+// for which this project currently doesn't support os.description
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+func platformOSDescription() (string, error) {
+	return "<unknown>", nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..faad64d8daa6bb37a45761f884216ad2c262a1a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -0,0 +1,101 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"fmt"
+	"strconv"
+
+	"golang.org/x/sys/windows/registry"
+)
+
+// platformOSDescription returns a human readable OS version information string.
+// It does so by querying registry values under the
+// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
+// resembles the one displayed by the Version Reporter Applet (winver.exe).
+func platformOSDescription() (string, error) {
+	k, err := registry.OpenKey(
+		registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+
+	if err != nil {
+		return "", err
+	}
+
+	defer k.Close()
+
+	var (
+		productName               = readProductName(k)
+		displayVersion            = readDisplayVersion(k)
+		releaseID                 = readReleaseID(k)
+		currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
+		currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
+		currentBuildNumber        = readCurrentBuildNumber(k)
+		ubr                       = readUBR(k)
+	)
+
+	if displayVersion != "" {
+		displayVersion += " "
+	}
+
+	return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
+		productName,
+		displayVersion,
+		releaseID,
+		currentMajorVersionNumber,
+		currentMinorVersionNumber,
+		currentBuildNumber,
+		ubr,
+	), nil
+}
+
+func getStringValue(name string, k registry.Key) string {
+	value, _, _ := k.GetStringValue(name)
+
+	return value
+}
+
+func getIntegerValue(name string, k registry.Key) uint64 {
+	value, _, _ := k.GetIntegerValue(name)
+
+	return value
+}
+
+func readProductName(k registry.Key) string {
+	return getStringValue("ProductName", k)
+}
+
+func readDisplayVersion(k registry.Key) string {
+	return getStringValue("DisplayVersion", k)
+}
+
+func readReleaseID(k registry.Key) string {
+	return getStringValue("ReleaseID", k)
+}
+
+func readCurrentMajorVersionNumber(k registry.Key) string {
+	return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
+}
+
+func readCurrentMinorVersionNumber(k registry.Key) string {
+	return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
+}
+
+func readCurrentBuildNumber(k registry.Key) string {
+	return getStringValue("CurrentBuildNumber", k)
+}
+
+func readUBR(k registry.Key) string {
+	return strconv.FormatUint(getIntegerValue("UBR", k), 10)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
new file mode 100644
index 0000000000000000000000000000000000000000..739ea4512ac1a09dd3b0f14246176674c7081612
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -0,0 +1,184 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime"
+
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type (
+	pidProvider            func() int
+	executablePathProvider func() (string, error)
+	commandArgsProvider    func() []string
+	ownerProvider          func() (*user.User, error)
+	runtimeNameProvider    func() string
+	runtimeVersionProvider func() string
+	runtimeOSProvider      func() string
+	runtimeArchProvider    func() string
+)
+
+var (
+	defaultPidProvider            pidProvider            = os.Getpid
+	defaultExecutablePathProvider executablePathProvider = os.Executable
+	defaultCommandArgsProvider    commandArgsProvider    = func() []string { return os.Args }
+	defaultOwnerProvider          ownerProvider          = user.Current
+	defaultRuntimeNameProvider    runtimeNameProvider    = func() string {
+		if runtime.Compiler == "gc" {
+			return "go"
+		}
+		return runtime.Compiler
+	}
+	defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
+	defaultRuntimeOSProvider      runtimeOSProvider      = func() string { return runtime.GOOS }
+	defaultRuntimeArchProvider    runtimeArchProvider    = func() string { return runtime.GOARCH }
+)
+
+var (
+	pid            = defaultPidProvider
+	executablePath = defaultExecutablePathProvider
+	commandArgs    = defaultCommandArgsProvider
+	owner          = defaultOwnerProvider
+	runtimeName    = defaultRuntimeNameProvider
+	runtimeVersion = defaultRuntimeVersionProvider
+	runtimeOS      = defaultRuntimeOSProvider
+	runtimeArch    = defaultRuntimeArchProvider
+)
+
+func setDefaultOSProviders() {
+	setOSProviders(
+		defaultPidProvider,
+		defaultExecutablePathProvider,
+		defaultCommandArgsProvider,
+	)
+}
+
+func setOSProviders(
+	pidProvider pidProvider,
+	executablePathProvider executablePathProvider,
+	commandArgsProvider commandArgsProvider,
+) {
+	pid = pidProvider
+	executablePath = executablePathProvider
+	commandArgs = commandArgsProvider
+}
+
+func setDefaultRuntimeProviders() {
+	setRuntimeProviders(
+		defaultRuntimeNameProvider,
+		defaultRuntimeVersionProvider,
+		defaultRuntimeOSProvider,
+		defaultRuntimeArchProvider,
+	)
+}
+
+func setRuntimeProviders(
+	runtimeNameProvider runtimeNameProvider,
+	runtimeVersionProvider runtimeVersionProvider,
+	runtimeOSProvider runtimeOSProvider,
+	runtimeArchProvider runtimeArchProvider,
+) {
+	runtimeName = runtimeNameProvider
+	runtimeVersion = runtimeVersionProvider
+	runtimeOS = runtimeOSProvider
+	runtimeArch = runtimeArchProvider
+}
+
+func setDefaultUserProviders() {
+	setUserProviders(defaultOwnerProvider)
+}
+
+func setUserProviders(ownerProvider ownerProvider) {
+	owner = ownerProvider
+}
+
+type (
+	processPIDDetector                struct{}
+	processExecutableNameDetector     struct{}
+	processExecutablePathDetector     struct{}
+	processCommandArgsDetector        struct{}
+	processOwnerDetector              struct{}
+	processRuntimeNameDetector        struct{}
+	processRuntimeVersionDetector     struct{}
+	processRuntimeDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the process identifier (PID) of the
+// executing process.
+func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
+}
+
+// Detect returns a *Resource that describes the name of the process executable.
+func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+	executableName := filepath.Base(commandArgs()[0])
+
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
+}
+
+// Detect returns a *Resource that describes the full path of the process executable.
+func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+	executablePath, err := executablePath()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
+}
+
+// Detect returns a *Resource that describes all the command arguments as received
+// by the process.
+func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
+}
+
+// Detect returns a *Resource that describes the username of the user that owns the
+// process.
+func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+	owner, err := owner()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
+}
+
+// Detect returns a *Resource that describes the name of the compiler used to compile
+// this process image.
+func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
+}
+
+// Detect returns a *Resource that describes the version of the runtime of this process.
+func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
+}
+
+// Detect returns a *Resource that describes the runtime of this process.
+func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+	runtimeDescription := fmt.Sprintf(
+		"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.ProcessRuntimeDescription(runtimeDescription),
+	), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb1ee0a9ceb30c2cc3a0ac0b963384bc0d80f29a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -0,0 +1,298 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// Resource describes an entity about which identifying information
+// and metadata is exposed.  Resource is an immutable object,
+// equivalent to a map from key to unique value.
+//
+// Resources should be passed and stored as pointers
+// (`*resource.Resource`).  The `nil` value is equivalent to an empty
+// Resource.
+type Resource struct {
+	attrs     attribute.Set
+	schemaURL string
+}
+
+var (
+	defaultResource     *Resource
+	defaultResourceOnce sync.Once
+)
+
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
+
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
+func New(ctx context.Context, opts ...Option) (*Resource, error) {
+	cfg := config{}
+	for _, opt := range opts {
+		cfg = opt.apply(cfg)
+	}
+
+	r := &Resource{schemaURL: cfg.schemaURL}
+	return r, detect(ctx, r, cfg.detectors)
+}
+
+// NewWithAttributes creates a resource from attrs and associates the resource with a
+// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
+// contains any invalid items those items will be dropped. The attrs are assumed to be
+// in a schema identified by schemaURL.
+func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
+	resource := NewSchemaless(attrs...)
+	resource.schemaURL = schemaURL
+	return resource
+}
+
+// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
+// the last value will be used. If attrs contains any invalid items those items will
+// be dropped. The resource will not be associated with a schema URL. If the schema
+// of the attrs is known use NewWithAttributes instead.
+func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
+	if len(attrs) == 0 {
+		return &Resource{}
+	}
+
+	// Ensure attributes comply with the specification:
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute
+	s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
+		return kv.Valid()
+	})
+
+	// If attrs only contains invalid entries do not allocate a new resource.
+	if s.Len() == 0 {
+		return &Resource{}
+	}
+
+	return &Resource{attrs: s} //nolint
+}
+
+// String implements the Stringer interface and provides a
+// human-readable form of the resource.
+//
+// Avoid using this representation as the key in a map of resources,
+// use Equivalent() as the key instead.
+func (r *Resource) String() string {
+	if r == nil {
+		return ""
+	}
+	return r.attrs.Encoded(attribute.DefaultEncoder())
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
+func (r *Resource) MarshalLog() interface{} {
+	return struct {
+		Attributes attribute.Set
+		SchemaURL  string
+	}{
+		Attributes: r.attrs,
+		SchemaURL:  r.schemaURL,
+	}
+}
+
+// Attributes returns a copy of attributes from the resource in a sorted order.
+// To avoid allocating a new slice, use an iterator.
+func (r *Resource) Attributes() []attribute.KeyValue {
+	if r == nil {
+		r = Empty()
+	}
+	return r.attrs.ToSlice()
+}
+
+// SchemaURL returns the schema URL associated with Resource r.
+func (r *Resource) SchemaURL() string {
+	if r == nil {
+		return ""
+	}
+	return r.schemaURL
+}
+
+// Iter returns an iterator of the Resource attributes.
+// This is ideal to use if you do not want a copy of the attributes.
+func (r *Resource) Iter() attribute.Iterator {
+	if r == nil {
+		r = Empty()
+	}
+	return r.attrs.Iter()
+}
+
+// Equal returns true when a Resource is equivalent to this Resource.
+func (r *Resource) Equal(eq *Resource) bool {
+	if r == nil {
+		r = Empty()
+	}
+	if eq == nil {
+		eq = Empty()
+	}
+	return r.Equivalent() == eq.Equivalent()
+}
+
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
+//
+//   - If a's schema URL is empty then the returned Resource's schema URL will
+//     be set to the schema URL of b,
+//   - Else if b's schema URL is empty then the returned Resource's schema URL
+//     will be set to the schema URL of a,
+//   - Else if the schema URLs of a and b are the same then that will be the
+//     schema URL of the returned Resource,
+//   - Else this is a merging error. If the resources have different,
+//     non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+//     be returned with the merged Resource. The merged Resource will have an
+//     empty schema URL. It may be the case that some unintended attributes
+//     have been overwritten or old semantic conventions persisted in the
+//     returned Resource. It is up to the caller to determine if this returned
+//     Resource should be used or not.
+//
+// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
+func Merge(a, b *Resource) (*Resource, error) {
+	if a == nil && b == nil {
+		return Empty(), nil
+	}
+	if a == nil {
+		return b, nil
+	}
+	if b == nil {
+		return a, nil
+	}
+
+	// Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
+	// Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
+	mi := attribute.NewMergeIterator(b.Set(), a.Set())
+	combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+	for mi.Next() {
+		combine = append(combine, mi.Attribute())
+	}
+
+	switch {
+	case a.schemaURL == "":
+		return NewWithAttributes(b.schemaURL, combine...), nil
+	case b.schemaURL == "":
+		return NewWithAttributes(a.schemaURL, combine...), nil
+	case a.schemaURL == b.schemaURL:
+		return NewWithAttributes(a.schemaURL, combine...), nil
+	}
+	// Return the merged resource with an appropriate error. It is up to
+	// the user to decide if the returned resource can be used or not.
+	return NewSchemaless(combine...), fmt.Errorf(
+		"%w: %s and %s",
+		ErrSchemaURLConflict,
+		a.schemaURL,
+		b.schemaURL,
+	)
+}
+
+// Empty returns an instance of Resource with no attributes. It is
+// equivalent to a `nil` Resource.
+func Empty() *Resource {
+	return &Resource{}
+}
+
+// Default returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+func Default() *Resource {
+	defaultResourceOnce.Do(func() {
+		var err error
+		defaultResource, err = Detect(
+			context.Background(),
+			defaultServiceNameDetector{},
+			fromEnv{},
+			telemetrySDK{},
+		)
+		if err != nil {
+			otel.Handle(err)
+		}
+		// If Detect did not return a valid resource, fall back to emptyResource.
+		if defaultResource == nil {
+			defaultResource = &Resource{}
+		}
+	})
+	return defaultResource
+}
+
+// Environment returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func Environment() *Resource {
+	detector := &fromEnv{}
+	resource, err := detector.Detect(context.Background())
+	if err != nil {
+		otel.Handle(err)
+	}
+	return resource
+}
+
+// Equivalent returns an object that can be compared for equality
+// between two resources. This value is suitable for use as a key in
+// a map.
+func (r *Resource) Equivalent() attribute.Distinct {
+	return r.Set().Equivalent()
+}
+
+// Set returns the equivalent *attribute.Set of this resource's attributes.
+func (r *Resource) Set() *attribute.Set {
+	if r == nil {
+		r = Empty()
+	}
+	return &r.attrs
+}
+
+// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
+// "...", "Value": ... } pairs in order sorted by key.
+func (r *Resource) MarshalJSON() ([]byte, error) {
+	if r == nil {
+		r = Empty()
+	}
+	return r.attrs.MarshalJSON()
+}
+
+// Len returns the number of unique key-values in this Resource.
+func (r *Resource) Len() int {
+	if r == nil {
+		return 0
+	}
+	return r.attrs.Len()
+}
+
+// Encoded returns an encoded representation of the resource.
+func (r *Resource) Encoded(enc attribute.Encoder) string {
+	if r == nil {
+		return ""
+	}
+	return r.attrs.Encoded(enc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..fca26f2e70ef937ac35c77f4d2e666481cc032dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -0,0 +1,420 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/internal/env"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Defaults for BatchSpanProcessorOptions.
+const (
+	DefaultMaxQueueSize       = 2048
+	DefaultScheduleDelay      = 5000
+	DefaultExportTimeout      = 30000
+	DefaultMaxExportBatchSize = 512
+)
+
+// BatchSpanProcessorOption configures a BatchSpanProcessor.
+type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
+
+// BatchSpanProcessorOptions is configuration settings for a
+// BatchSpanProcessor.
+type BatchSpanProcessorOptions struct {
+	// MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
+	// queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
+	// The default value of MaxQueueSize is 2048.
+	MaxQueueSize int
+
+	// BatchTimeout is the maximum duration for constructing a batch. Processor
+	// forcefully sends available spans when timeout is reached.
+	// The default value of BatchTimeout is 5000 msec.
+	BatchTimeout time.Duration
+
+	// ExportTimeout specifies the maximum duration for exporting spans. If the timeout
+	// is reached, the export will be cancelled.
+	// The default value of ExportTimeout is 30000 msec.
+	ExportTimeout time.Duration
+
+	// MaxExportBatchSize is the maximum number of spans to process in a single batch.
+	// If there are more than one batch worth of spans then it processes multiple batches
+	// of spans one batch after the other without any delay.
+	// The default value of MaxExportBatchSize is 512.
+	MaxExportBatchSize int
+
+	// BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+	// AND if BlockOnQueueFull is set to true.
+	// Blocking option should be used carefully as it can severely affect the performance of an
+	// application.
+	BlockOnQueueFull bool
+}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+	e SpanExporter
+	o BatchSpanProcessorOptions
+
+	queue   chan ReadOnlySpan
+	dropped uint32
+
+	batch      []ReadOnlySpan
+	batchMutex sync.Mutex
+	timer      *time.Timer
+	stopWait   sync.WaitGroup
+	stopOnce   sync.Once
+	stopCh     chan struct{}
+	stopped    atomic.Bool
+}
+
+var _ SpanProcessor = (*batchSpanProcessor)(nil)
+
+// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
+// span batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the span processor will perform no action.
+func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
+	maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
+	maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+	if maxExportBatchSize > maxQueueSize {
+		if DefaultMaxExportBatchSize > maxQueueSize {
+			maxExportBatchSize = maxQueueSize
+		} else {
+			maxExportBatchSize = DefaultMaxExportBatchSize
+		}
+	}
+
+	o := BatchSpanProcessorOptions{
+		BatchTimeout:       time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+		ExportTimeout:      time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+		MaxQueueSize:       maxQueueSize,
+		MaxExportBatchSize: maxExportBatchSize,
+	}
+	for _, opt := range options {
+		opt(&o)
+	}
+	bsp := &batchSpanProcessor{
+		e:      exporter,
+		o:      o,
+		batch:  make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
+		timer:  time.NewTimer(o.BatchTimeout),
+		queue:  make(chan ReadOnlySpan, o.MaxQueueSize),
+		stopCh: make(chan struct{}),
+	}
+
+	bsp.stopWait.Add(1)
+	go func() {
+		defer bsp.stopWait.Done()
+		bsp.processQueue()
+		bsp.drainQueue()
+	}()
+
+	return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+	// Do not enqueue spans after Shutdown.
+	if bsp.stopped.Load() {
+		return
+	}
+
+	// Do not enqueue spans if we are just going to drop them.
+	if bsp.e == nil {
+		return
+	}
+	bsp.enqueue(s)
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	bsp.stopOnce.Do(func() {
+		bsp.stopped.Store(true)
+		wait := make(chan struct{})
+		go func() {
+			close(bsp.stopCh)
+			bsp.stopWait.Wait()
+			if bsp.e != nil {
+				if err := bsp.e.Shutdown(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+			close(wait)
+		}()
+		// Wait until the wait group is done or the context is cancelled
+		select {
+		case <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	})
+	return err
+}
+
+type forceFlushSpan struct {
+	ReadOnlySpan
+	flushed chan struct{}
+}
+
+func (f forceFlushSpan) SpanContext() trace.SpanContext {
+	return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
+}
+
+// ForceFlush exports all ended spans that have not yet been exported.
+func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+	// Interrupt if context is already canceled.
+	if err := ctx.Err(); err != nil {
+		return err
+	}
+
+	// Do nothing after Shutdown.
+	if bsp.stopped.Load() {
+		return nil
+	}
+
+	var err error
+	if bsp.e != nil {
+		flushCh := make(chan struct{})
+		if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
+			select {
+			case <-bsp.stopCh:
+				// The batchSpanProcessor is Shutdown.
+				return nil
+			case <-flushCh:
+				// Processed any items in queue prior to ForceFlush being called
+			case <-ctx.Done():
+				return ctx.Err()
+			}
+		}
+
+		wait := make(chan error)
+		go func() {
+			wait <- bsp.exportSpans(ctx)
+			close(wait)
+		}()
+		// Wait until the export is finished or the context is cancelled/timed out
+		select {
+		case err = <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	}
+	return err
+}
+
+// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
+// maximum queue size allowed for a BatchSpanProcessor.
+func WithMaxQueueSize(size int) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.MaxQueueSize = size
+	}
+}
+
+// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
+// the maximum export batch size allowed for a BatchSpanProcessor.
+func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.MaxExportBatchSize = size
+	}
+}
+
+// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
+// maximum delay allowed for a BatchSpanProcessor before it will export any
+// held span (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.BatchTimeout = delay
+	}
+}
+
+// WithExportTimeout returns a BatchSpanProcessorOption that configures the
+// amount of time a BatchSpanProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.ExportTimeout = timeout
+	}
+}
+
+// WithBlocking returns a BatchSpanProcessorOption that configures a
+// BatchSpanProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.BlockOnQueueFull = true
+	}
+}
+
+// exportSpans is a subroutine of processing and draining the queue.
+func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
+	bsp.timer.Reset(bsp.o.BatchTimeout)
+
+	bsp.batchMutex.Lock()
+	defer bsp.batchMutex.Unlock()
+
+	if bsp.o.ExportTimeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
+		defer cancel()
+	}
+
+	if l := len(bsp.batch); l > 0 {
+		global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+		err := bsp.e.ExportSpans(ctx, bsp.batch)
+
+		// A new batch is always created after exporting, even if the batch failed to be exported.
+		//
+		// It is up to the exporter to implement any type of retry logic if a batch is failing
+		// to be exported, since it is specific to the protocol and backend being sent to.
+		bsp.batch = bsp.batch[:0]
+
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// processQueue removes spans from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (bsp *batchSpanProcessor) processQueue() {
+	defer bsp.timer.Stop()
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case <-bsp.stopCh:
+			return
+		case <-bsp.timer.C:
+			if err := bsp.exportSpans(ctx); err != nil {
+				otel.Handle(err)
+			}
+		case sd := <-bsp.queue:
+			if ffs, ok := sd.(forceFlushSpan); ok {
+				close(ffs.flushed)
+				continue
+			}
+			bsp.batchMutex.Lock()
+			bsp.batch = append(bsp.batch, sd)
+			shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
+			bsp.batchMutex.Unlock()
+			if shouldExport {
+				if !bsp.timer.Stop() {
+					<-bsp.timer.C
+				}
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		}
+	}
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (bsp *batchSpanProcessor) drainQueue() {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case sd := <-bsp.queue:
+			if _, ok := sd.(forceFlushSpan); ok {
+				// Ignore flush requests as they are not valid spans.
+				continue
+			}
+
+			bsp.batchMutex.Lock()
+			bsp.batch = append(bsp.batch, sd)
+			shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
+			bsp.batchMutex.Unlock()
+
+			if shouldExport {
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		default:
+			// There are no more enqueued spans. Make final export.
+			if err := bsp.exportSpans(ctx); err != nil {
+				otel.Handle(err)
+			}
+			return
+		}
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
+	ctx := context.TODO()
+	if bsp.o.BlockOnQueueFull {
+		bsp.enqueueBlockOnQueueFull(ctx, sd)
+	} else {
+		bsp.enqueueDrop(ctx, sd)
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
+	if !sd.SpanContext().IsSampled() {
+		return false
+	}
+
+	select {
+	case bsp.queue <- sd:
+		return true
+	case <-ctx.Done():
+		return false
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
+	if !sd.SpanContext().IsSampled() {
+		return false
+	}
+
+	select {
+	case bsp.queue <- sd:
+		return true
+	default:
+		atomic.AddUint32(&bsp.dropped, 1)
+	}
+	return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+	return struct {
+		Type         string
+		SpanExporter SpanExporter
+		Config       BatchSpanProcessorOptions
+	}{
+		Type:         "BatchSpanProcessor",
+		SpanExporter: bsp.e,
+		Config:       bsp.o,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..0285e99be073748585ed205a259ee8e4f463f4dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package trace contains support for OpenTelemetry distributed tracing.
+
+The following assumes a basic familiarity with OpenTelemetry concepts.
+See https://opentelemetry.io.
+*/
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e3b426757d98d7c541c9526070886abccd65979
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// Event is a thing that happened during a Span's lifetime.
+type Event struct {
+	// Name is the name of this event
+	Name string
+
+	// Attributes describe the aspects of the event.
+	Attributes []attribute.KeyValue
+
+	// DroppedAttributeCount is the number of attributes that were not
+	// recorded due to configured limits being reached.
+	DroppedAttributeCount int
+
+	// Time at which this event was recorded.
+	Time time.Time
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1c86e59b22d5b7de999da93572f75098be69810
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// evictedQueue is a FIFO queue with a configurable capacity.
+type evictedQueue struct {
+	queue        []interface{}
+	capacity     int
+	droppedCount int
+}
+
+func newEvictedQueue(capacity int) evictedQueue {
+	// Do not pre-allocate queue, do this lazily.
+	return evictedQueue{capacity: capacity}
+}
+
+// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
+// queued value will be discarded and the drop count incremented.
+func (eq *evictedQueue) add(value interface{}) {
+	if eq.capacity == 0 {
+		eq.droppedCount++
+		return
+	}
+
+	if eq.capacity > 0 && len(eq.queue) == eq.capacity {
+		// Drop first-in while avoiding allocating more capacity to eq.queue.
+		copy(eq.queue[:eq.capacity-1], eq.queue[1:])
+		eq.queue = eq.queue[:eq.capacity-1]
+		eq.droppedCount++
+	}
+	eq.queue = append(eq.queue, value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
new file mode 100644
index 0000000000000000000000000000000000000000..bba246041a44762c419fe677e16d210f065ccc5a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	crand "crypto/rand"
+	"encoding/binary"
+	"math/rand"
+	"sync"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+// IDGenerator allows custom generators for TraceID and SpanID.
+type IDGenerator interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// NewIDs returns a new trace and span ID.
+	NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// NewSpanID returns a ID for a new span in the trace with traceID.
+	NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type randomIDGenerator struct {
+	sync.Mutex
+	randSource *rand.Rand
+}
+
+var _ IDGenerator = &randomIDGenerator{}
+
+// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
+func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+	gen.Lock()
+	defer gen.Unlock()
+	sid := trace.SpanID{}
+	_, _ = gen.randSource.Read(sid[:])
+	return sid
+}
+
+// NewIDs returns a non-zero trace ID and a non-zero span ID from a
+// randomly-chosen sequence.
+func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+	gen.Lock()
+	defer gen.Unlock()
+	tid := trace.TraceID{}
+	_, _ = gen.randSource.Read(tid[:])
+	sid := trace.SpanID{}
+	_, _ = gen.randSource.Read(sid[:])
+	return tid, sid
+}
+
+func defaultIDGenerator() IDGenerator {
+	gen := &randomIDGenerator{}
+	var rngSeed int64
+	_ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
+	gen.randSource = rand.New(rand.NewSource(rngSeed))
+	return gen
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
new file mode 100644
index 0000000000000000000000000000000000000000..19cfea4ba4544d63feca2a5b49c3332f6d691598
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+type Link struct {
+	// SpanContext of the linked Span.
+	SpanContext trace.SpanContext
+
+	// Attributes describe the aspects of the link.
+	Attributes []attribute.KeyValue
+
+	// DroppedAttributeCount is the number of attributes that were not
+	// recorded due to configured limits being reached.
+	DroppedAttributeCount int
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..b1ac608464a0eae73e609ec2990a248f8dccb9cd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -0,0 +1,504 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
+)
+
+const (
+	defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+)
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+	// processors contains collection of SpanProcessors that are processing pipeline
+	// for spans in the trace signal.
+	// SpanProcessors registered with a TracerProvider and are called at the start
+	// and end of a Span's lifecycle, and are called in the order they are
+	// registered.
+	processors []SpanProcessor
+
+	// sampler is the default sampler used when creating new spans.
+	sampler Sampler
+
+	// idGenerator is used to generate all Span and Trace IDs when needed.
+	idGenerator IDGenerator
+
+	// spanLimits defines the attribute, event, and link limits for spans.
+	spanLimits SpanLimits
+
+	// resource contains attributes representing an entity that produces telemetry.
+	resource *resource.Resource
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
+func (cfg tracerProviderConfig) MarshalLog() interface{} {
+	return struct {
+		SpanProcessors  []SpanProcessor
+		SamplerType     string
+		IDGeneratorType string
+		SpanLimits      SpanLimits
+		Resource        *resource.Resource
+	}{
+		SpanProcessors:  cfg.processors,
+		SamplerType:     fmt.Sprintf("%T", cfg.sampler),
+		IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
+		SpanLimits:      cfg.spanLimits,
+		Resource:        cfg.resource,
+	}
+}
+
+// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
+// instrumentation so it can trace operational flow through a system.
+type TracerProvider struct {
+	embedded.TracerProvider
+
+	mu             sync.Mutex
+	namedTracer    map[instrumentation.Scope]*tracer
+	spanProcessors atomic.Pointer[spanProcessorStates]
+
+	isShutdown atomic.Bool
+
+	// These fields are not protected by the lock mu. They are assumed to be
+	// immutable after creation of the TracerProvider.
+	sampler     Sampler
+	idGenerator IDGenerator
+	spanLimits  SpanLimits
+	resource    *resource.Resource
+}
+
+var _ trace.TracerProvider = &TracerProvider{}
+
+// NewTracerProvider returns a new and configured TracerProvider.
+//
+// By default the returned TracerProvider is configured with:
+//   - a ParentBased(AlwaysSample) Sampler
+//   - a random number IDGenerator
+//   - the resource.Default() Resource
+//   - the default SpanLimits.
+//
+// The passed opts are used to override these default values and configure the
+// returned TracerProvider appropriately.
+func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
+	o := tracerProviderConfig{
+		spanLimits: NewSpanLimits(),
+	}
+	o = applyTracerProviderEnvConfigs(o)
+
+	for _, opt := range opts {
+		o = opt.apply(o)
+	}
+
+	o = ensureValidTracerProviderConfig(o)
+
+	tp := &TracerProvider{
+		namedTracer: make(map[instrumentation.Scope]*tracer),
+		sampler:     o.sampler,
+		idGenerator: o.idGenerator,
+		spanLimits:  o.spanLimits,
+		resource:    o.resource,
+	}
+	global.Info("TracerProvider created", "config", o)
+
+	spss := make(spanProcessorStates, 0, len(o.processors))
+	for _, sp := range o.processors {
+		spss = append(spss, newSpanProcessorState(sp))
+	}
+	tp.spanProcessors.Store(&spss)
+
+	return tp
+}
+
+// Tracer returns a Tracer with the given name and options. If a Tracer for
+// the given name and options does not exist it is created, otherwise the
+// existing Tracer is returned.
+//
+// If name is empty, DefaultTracerName is used instead.
+//
+// This method is safe to be called concurrently.
+func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	// This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
+	if p.isShutdown.Load() {
+		return noop.NewTracerProvider().Tracer(name, opts...)
+	}
+	c := trace.NewTracerConfig(opts...)
+	if name == "" {
+		name = defaultTracerName
+	}
+	is := instrumentation.Scope{
+		Name:      name,
+		Version:   c.InstrumentationVersion(),
+		SchemaURL: c.SchemaURL(),
+	}
+
+	t, ok := func() (trace.Tracer, bool) {
+		p.mu.Lock()
+		defer p.mu.Unlock()
+		// Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
+		// after the first check above but before we acquired the mutex.
+		if p.isShutdown.Load() {
+			return noop.NewTracerProvider().Tracer(name, opts...), true
+		}
+		t, ok := p.namedTracer[is]
+		if !ok {
+			t = &tracer{
+				provider:             p,
+				instrumentationScope: is,
+			}
+			p.namedTracer[is] = t
+		}
+		return t, ok
+	}()
+	if !ok {
+		// This code is outside the mutex to not hold the lock while calling third party logging code:
+		// - That code may do slow things like I/O, which would prolong the duration the lock is held,
+		//   slowing down all tracing consumers.
+		// - Logging code may be instrumented with tracing and deadlock because it could try
+		//   acquiring the same non-reentrant mutex.
+		global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
+	}
+	return t
+}
+
+// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
+	// This check prevents calls during a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+
+	current := p.getSpanProcessors()
+	newSPS := make(spanProcessorStates, 0, len(current)+1)
+	newSPS = append(newSPS, current...)
+	newSPS = append(newSPS, newSpanProcessorState(sp))
+	p.spanProcessors.Store(&newSPS)
+}
+
+// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
+	// This check prevents calls during a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown.
+	if p.isShutdown.Load() {
+		return
+	}
+	old := p.getSpanProcessors()
+	if len(old) == 0 {
+		return
+	}
+	spss := make(spanProcessorStates, len(old))
+	copy(spss, old)
+
+	// stop the span processor if it is started and remove it from the list
+	var stopOnce *spanProcessorState
+	var idx int
+	for i, sps := range spss {
+		if sps.sp == sp {
+			stopOnce = sps
+			idx = i
+		}
+	}
+	if stopOnce != nil {
+		stopOnce.state.Do(func() {
+			if err := sp.Shutdown(context.Background()); err != nil {
+				otel.Handle(err)
+			}
+		})
+	}
+	if len(spss) > 1 {
+		copy(spss[idx:], spss[idx+1:])
+	}
+	spss[len(spss)-1] = nil
+	spss = spss[:len(spss)-1]
+
+	p.spanProcessors.Store(&spss)
+}
+
+// ForceFlush immediately exports all spans that have not yet been exported for
+// all the registered span processors.
+func (p *TracerProvider) ForceFlush(ctx context.Context) error {
+	spss := p.getSpanProcessors()
+	if len(spss) == 0 {
+		return nil
+	}
+
+	for _, sps := range spss {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		if err := sps.sp.ForceFlush(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
+// After Shutdown is called, all methods are no-ops.
+func (p *TracerProvider) Shutdown(ctx context.Context) error {
+	// This check prevents deadlocks in case of recursive shutdown.
+	if p.isShutdown.Load() {
+		return nil
+	}
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	// This check prevents calls after a shutdown has already been done concurrently.
+	if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+		return nil
+	}
+
+	var retErr error
+	for _, sps := range p.getSpanProcessors() {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		var err error
+		sps.state.Do(func() {
+			err = sps.sp.Shutdown(ctx)
+		})
+		if err != nil {
+			if retErr == nil {
+				retErr = err
+			} else {
+				// Poor man's list of errors
+				retErr = fmt.Errorf("%v; %v", retErr, err)
+			}
+		}
+	}
+	p.spanProcessors.Store(&spanProcessorStates{})
+	return retErr
+}
+
+func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
+	return *(p.spanProcessors.Load())
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+	apply(tracerProviderConfig) tracerProviderConfig
+}
+
+type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
+
+func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
+	return fn(cfg)
+}
+
+// WithSyncer registers the exporter with the TracerProvider using a
+// SimpleSpanProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleSpanProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e SpanExporter) TracerProviderOption {
+	return WithSpanProcessor(NewSimpleSpanProcessor(e))
+}
+
+// WithBatcher registers the exporter with the TracerProvider using a
+// BatchSpanProcessor configured with the passed opts.
+func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
+	return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
+}
+
+// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
+func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.processors = append(cfg.processors, sp)
+		return cfg
+	})
+}
+
+// WithResource returns a TracerProviderOption that will configure the
+// Resource r as a TracerProvider's Resource. The configured Resource is
+// referenced by all the Tracers the TracerProvider creates. It represents the
+// entity producing telemetry.
+//
+// If this option is not used, the TracerProvider will use the
+// resource.Default() Resource by default.
+func WithResource(r *resource.Resource) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		var err error
+		cfg.resource, err = resource.Merge(resource.Environment(), r)
+		if err != nil {
+			otel.Handle(err)
+		}
+		return cfg
+	})
+}
+
+// WithIDGenerator returns a TracerProviderOption that will configure the
+// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
+// is used by the Tracers the TracerProvider creates to generate new Span and
+// Trace IDs.
+//
+// If this option is not used, the TracerProvider will use a random number
+// IDGenerator by default.
+func WithIDGenerator(g IDGenerator) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		if g != nil {
+			cfg.idGenerator = g
+		}
+		return cfg
+	})
+}
+
+// WithSampler returns a TracerProviderOption that will configure the Sampler
+// s as a TracerProvider's Sampler. The configured Sampler is used by the
+// Tracers the TracerProvider creates to make their sampling decisions for the
+// Spans they create.
+//
+// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
+// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
+// and the sampler is not configured through environment variables or the environment
+// contains invalid/unsupported configuration, the TracerProvider will use a
+// ParentBased(AlwaysSample) Sampler by default.
+func WithSampler(s Sampler) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		if s != nil {
+			cfg.sampler = s
+		}
+		return cfg
+	})
+}
+
+// WithSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
+// created by a Tracer from the TracerProvider.
+//
+// If any field of sl is zero or negative it will be replaced with the default
+// value for that field.
+//
+// If this or WithRawSpanLimits are not provided, the TracerProvider will use
+// the limits defined by environment variables, or the defaults if unset.
+// Refer to the NewSpanLimits documentation for information about this
+// relationship.
+//
+// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
+// and zero limits. This option will be kept until the next major version
+// incremented release.
+func WithSpanLimits(sl SpanLimits) TracerProviderOption {
+	if sl.AttributeValueLengthLimit <= 0 {
+		sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
+	}
+	if sl.AttributeCountLimit <= 0 {
+		sl.AttributeCountLimit = DefaultAttributeCountLimit
+	}
+	if sl.EventCountLimit <= 0 {
+		sl.EventCountLimit = DefaultEventCountLimit
+	}
+	if sl.AttributePerEventCountLimit <= 0 {
+		sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
+	}
+	if sl.LinkCountLimit <= 0 {
+		sl.LinkCountLimit = DefaultLinkCountLimit
+	}
+	if sl.AttributePerLinkCountLimit <= 0 {
+		sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
+	}
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.spanLimits = sl
+		return cfg
+	})
+}
+
+// WithRawSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use these limits. These limits bound any Span created by
+// a Tracer from the TracerProvider.
+//
+// The limits will be used as-is. Zero or negative values will not be changed
+// to the default value like WithSpanLimits does. Setting a limit to zero will
+// effectively disable the related resource it limits and setting to a
+// negative value will mean that resource is unlimited. Consequentially, this
+// means that the zero-value SpanLimits will disable all span resources.
+// Because of this, limits should be constructed using NewSpanLimits and
+// updated accordingly.
+//
+// If this or WithSpanLimits are not provided, the TracerProvider will use the
+// limits defined by environment variables, or the defaults if unset. Refer to
+// the NewSpanLimits documentation for information about this relationship.
+func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.spanLimits = limits
+		return cfg
+	})
+}
+
+func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
+	for _, opt := range tracerProviderOptionsFromEnv() {
+		cfg = opt.apply(cfg)
+	}
+
+	return cfg
+}
+
+func tracerProviderOptionsFromEnv() []TracerProviderOption {
+	var opts []TracerProviderOption
+
+	sampler, err := samplerFromEnv()
+	if err != nil {
+		otel.Handle(err)
+	}
+
+	if sampler != nil {
+		opts = append(opts, WithSampler(sampler))
+	}
+
+	return opts
+}
+
+// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
+func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
+	if cfg.sampler == nil {
+		cfg.sampler = ParentBased(AlwaysSample())
+	}
+	if cfg.idGenerator == nil {
+		cfg.idGenerator = defaultIDGenerator()
+	}
+	if cfg.resource == nil {
+		cfg.resource = resource.Default()
+	}
+	return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
new file mode 100644
index 0000000000000000000000000000000000000000..02053b318aeb4a499ff44710e1357901a41e8e2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -0,0 +1,108 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+const (
+	tracesSamplerKey    = "OTEL_TRACES_SAMPLER"
+	tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
+
+	samplerAlwaysOn                = "always_on"
+	samplerAlwaysOff               = "always_off"
+	samplerTraceIDRatio            = "traceidratio"
+	samplerParentBasedAlwaysOn     = "parentbased_always_on"
+	samplerParsedBasedAlwaysOff    = "parentbased_always_off"
+	samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
+)
+
+type errUnsupportedSampler string
+
+func (e errUnsupportedSampler) Error() string {
+	return fmt.Sprintf("unsupported sampler: %s", string(e))
+}
+
+var (
+	errNegativeTraceIDRatio       = errors.New("invalid trace ID ratio: less than 0.0")
+	errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
+)
+
+type samplerArgParseError struct {
+	parseErr error
+}
+
+func (e samplerArgParseError) Error() string {
+	return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error())
+}
+
+func (e samplerArgParseError) Unwrap() error {
+	return e.parseErr
+}
+
+func samplerFromEnv() (Sampler, error) {
+	sampler, ok := os.LookupEnv(tracesSamplerKey)
+	if !ok {
+		return nil, nil
+	}
+
+	sampler = strings.ToLower(strings.TrimSpace(sampler))
+	samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
+	samplerArg = strings.TrimSpace(samplerArg)
+
+	switch sampler {
+	case samplerAlwaysOn:
+		return AlwaysSample(), nil
+	case samplerAlwaysOff:
+		return NeverSample(), nil
+	case samplerTraceIDRatio:
+		if !hasSamplerArg {
+			return TraceIDRatioBased(1.0), nil
+		}
+		return parseTraceIDRatio(samplerArg)
+	case samplerParentBasedAlwaysOn:
+		return ParentBased(AlwaysSample()), nil
+	case samplerParsedBasedAlwaysOff:
+		return ParentBased(NeverSample()), nil
+	case samplerParentBasedTraceIDRatio:
+		if !hasSamplerArg {
+			return ParentBased(TraceIDRatioBased(1.0)), nil
+		}
+		ratio, err := parseTraceIDRatio(samplerArg)
+		return ParentBased(ratio), err
+	default:
+		return nil, errUnsupportedSampler(sampler)
+	}
+}
+
+func parseTraceIDRatio(arg string) (Sampler, error) {
+	v, err := strconv.ParseFloat(arg, 64)
+	if err != nil {
+		return TraceIDRatioBased(1.0), samplerArgParseError{err}
+	}
+	if v < 0.0 {
+		return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
+	}
+	if v > 1.0 {
+		return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
+	}
+
+	return TraceIDRatioBased(v), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7bc125b9e8a3aad7c261d27d8c319c20f90e440
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -0,0 +1,293 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"encoding/binary"
+	"fmt"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Sampler decides whether a trace should be sampled and exported.
+type Sampler interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ShouldSample returns a SamplingResult based on a decision made from the
+	// passed parameters.
+	ShouldSample(parameters SamplingParameters) SamplingResult
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Description returns information describing the Sampler.
+	Description() string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// SamplingParameters contains the values passed to a Sampler.
+type SamplingParameters struct {
+	ParentContext context.Context
+	TraceID       trace.TraceID
+	Name          string
+	Kind          trace.SpanKind
+	Attributes    []attribute.KeyValue
+	Links         []trace.Link
+}
+
+// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
+type SamplingDecision uint8
+
+// Valid sampling decisions.
+const (
+	// Drop will not record the span and all attributes/events will be dropped.
+	Drop SamplingDecision = iota
+
+	// Record indicates the span's `IsRecording() == true`, but `Sampled` flag
+	// *must not* be set.
+	RecordOnly
+
+	// RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
+	// *must* be set.
+	RecordAndSample
+)
+
+// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
+type SamplingResult struct {
+	Decision   SamplingDecision
+	Attributes []attribute.KeyValue
+	Tracestate trace.TraceState
+}
+
+type traceIDRatioSampler struct {
+	traceIDUpperBound uint64
+	description       string
+}
+
+func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	psc := trace.SpanContextFromContext(p.ParentContext)
+	x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
+	if x < ts.traceIDUpperBound {
+		return SamplingResult{
+			Decision:   RecordAndSample,
+			Tracestate: psc.TraceState(),
+		}
+	}
+	return SamplingResult{
+		Decision:   Drop,
+		Tracestate: psc.TraceState(),
+	}
+}
+
+func (ts traceIDRatioSampler) Description() string {
+	return ts.description
+}
+
+// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
+// always sample. Fractions < 0 are treated as zero. To respect the
+// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
+// as a delegate of a `Parent` sampler.
+//
+//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
+func TraceIDRatioBased(fraction float64) Sampler {
+	if fraction >= 1 {
+		return AlwaysSample()
+	}
+
+	if fraction <= 0 {
+		fraction = 0
+	}
+
+	return &traceIDRatioSampler{
+		traceIDUpperBound: uint64(fraction * (1 << 63)),
+		description:       fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
+	}
+}
+
+type alwaysOnSampler struct{}
+
+func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	return SamplingResult{
+		Decision:   RecordAndSample,
+		Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+	}
+}
+
+func (as alwaysOnSampler) Description() string {
+	return "AlwaysOnSampler"
+}
+
+// AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
+func AlwaysSample() Sampler {
+	return alwaysOnSampler{}
+}
+
+type alwaysOffSampler struct{}
+
+func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	return SamplingResult{
+		Decision:   Drop,
+		Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+	}
+}
+
+func (as alwaysOffSampler) Description() string {
+	return "AlwaysOffSampler"
+}
+
+// NeverSample returns a Sampler that samples no traces.
+func NeverSample() Sampler {
+	return alwaysOffSampler{}
+}
+
+// ParentBased returns a sampler decorator which behaves differently,
+// based on the parent of the span. If the span has no parent,
+// the decorated sampler is used to make sampling decision. If the span has
+// a parent, depending on whether the parent is remote and whether it
+// is sampled, one of the following samplers will apply:
+//   - remoteParentSampled(Sampler) (default: AlwaysOn)
+//   - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+//   - localParentSampled(Sampler) (default: AlwaysOn)
+//   - localParentNotSampled(Sampler) (default: AlwaysOff)
+func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
+	return parentBased{
+		root:   root,
+		config: configureSamplersForParentBased(samplers),
+	}
+}
+
+type parentBased struct {
+	root   Sampler
+	config samplerConfig
+}
+
+func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
+	c := samplerConfig{
+		remoteParentSampled:    AlwaysSample(),
+		remoteParentNotSampled: NeverSample(),
+		localParentSampled:     AlwaysSample(),
+		localParentNotSampled:  NeverSample(),
+	}
+
+	for _, so := range samplers {
+		c = so.apply(c)
+	}
+
+	return c
+}
+
+// samplerConfig is a group of options for parentBased sampler.
+type samplerConfig struct {
+	remoteParentSampled, remoteParentNotSampled Sampler
+	localParentSampled, localParentNotSampled   Sampler
+}
+
+// ParentBasedSamplerOption configures the sampler for a particular sampling case.
+type ParentBasedSamplerOption interface {
+	apply(samplerConfig) samplerConfig
+}
+
+// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
+func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
+	return remoteParentSampledOption{s}
+}
+
+type remoteParentSampledOption struct {
+	s Sampler
+}
+
+func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
+	config.remoteParentSampled = o.s
+	return config
+}
+
+// WithRemoteParentNotSampled sets the sampler for the case of remote parent
+// which is not sampled.
+func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
+	return remoteParentNotSampledOption{s}
+}
+
+type remoteParentNotSampledOption struct {
+	s Sampler
+}
+
+func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+	config.remoteParentNotSampled = o.s
+	return config
+}
+
+// WithLocalParentSampled sets the sampler for the case of sampled local parent.
+func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
+	return localParentSampledOption{s}
+}
+
+type localParentSampledOption struct {
+	s Sampler
+}
+
+func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
+	config.localParentSampled = o.s
+	return config
+}
+
+// WithLocalParentNotSampled sets the sampler for the case of local parent
+// which is not sampled.
+func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
+	return localParentNotSampledOption{s}
+}
+
+type localParentNotSampledOption struct {
+	s Sampler
+}
+
+func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+	config.localParentNotSampled = o.s
+	return config
+}
+
+func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
+	psc := trace.SpanContextFromContext(p.ParentContext)
+	if psc.IsValid() {
+		if psc.IsRemote() {
+			if psc.IsSampled() {
+				return pb.config.remoteParentSampled.ShouldSample(p)
+			}
+			return pb.config.remoteParentNotSampled.ShouldSample(p)
+		}
+
+		if psc.IsSampled() {
+			return pb.config.localParentSampled.ShouldSample(p)
+		}
+		return pb.config.localParentNotSampled.ShouldSample(p)
+	}
+	return pb.root.ShouldSample(p)
+}
+
+func (pb parentBased) Description() string {
+	return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
+		"remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
+		pb.root.Description(),
+		pb.config.remoteParentSampled.Description(),
+		pb.config.remoteParentNotSampled.Description(),
+		pb.config.localParentSampled.Description(),
+		pb.config.localParentNotSampled.Description(),
+	)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8770fff79bb86d046f4e3c063d1991ddab496b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -0,0 +1,131 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// simpleSpanProcessor is a SpanProcessor that synchronously sends all
+// completed Spans to a trace.Exporter immediately.
+type simpleSpanProcessor struct {
+	exporterMu sync.Mutex
+	exporter   SpanExporter
+	stopOnce   sync.Once
+}
+
+var _ SpanProcessor = (*simpleSpanProcessor)(nil)
+
+// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
+// send completed spans to the exporter immediately.
+//
+// This SpanProcessor is not recommended for production use. The synchronous
+// nature of this SpanProcessor make it good for testing, debugging, or
+// showing examples of other feature, but it will be slow and have a high
+// computation resource usage overhead. The BatchSpanProcessor is recommended
+// for production use instead.
+func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
+	ssp := &simpleSpanProcessor{
+		exporter: exporter,
+	}
+	global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
+
+	return ssp
+}
+
+// OnStart does nothing.
+func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+
+// OnEnd immediately exports a ReadOnlySpan.
+func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
+	ssp.exporterMu.Lock()
+	defer ssp.exporterMu.Unlock()
+
+	if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
+		if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+			otel.Handle(err)
+		}
+	}
+}
+
+// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
+func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	ssp.stopOnce.Do(func() {
+		stopFunc := func(exp SpanExporter) (<-chan error, func()) {
+			done := make(chan error)
+			return done, func() { done <- exp.Shutdown(ctx) }
+		}
+
+		// The exporter field of the simpleSpanProcessor needs to be zeroed to
+		// signal it is shut down, meaning all subsequent calls to OnEnd will
+		// be gracefully ignored. This needs to be done synchronously to avoid
+		// any race condition.
+		//
+		// A closure is used to keep reference to the exporter and then the
+		// field is zeroed. This ensures the simpleSpanProcessor is shut down
+		// before the exporter. This order is important as it avoids a
+		// potential deadlock. If the exporter shut down operation generates a
+		// span, that span would need to be exported. Meaning, OnEnd would be
+		// called and try acquiring the lock that is held here.
+		ssp.exporterMu.Lock()
+		done, shutdown := stopFunc(ssp.exporter)
+		ssp.exporter = nil
+		ssp.exporterMu.Unlock()
+
+		go shutdown()
+
+		// Wait for the exporter to shut down or the deadline to expire.
+		select {
+		case err = <-done:
+		case <-ctx.Done():
+			// It is possible for the exporter to have immediately shut down
+			// and the context to be done simultaneously. In that case this
+			// outer select statement will randomly choose a case. This will
+			// result in a different returned error for similar scenarios.
+			// Instead, double check if the exporter shut down at the same
+			// time and return that error if so. This will ensure consistency
+			// as well as ensure the caller knows the exporter shut down
+			// successfully (they can already determine if the deadline is
+			// expired given they passed the context).
+			select {
+			case err = <-done:
+			default:
+				err = ctx.Err()
+			}
+		}
+	})
+	return err
+}
+
+// ForceFlush does nothing as there is no data to flush.
+func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+	return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Exporter SpanExporter
+	}{
+		Type:     "SimpleSpanProcessor",
+		Exporter: ssp.exporter,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
new file mode 100644
index 0000000000000000000000000000000000000000..0349b2f198e4d583079d4b6a6233fcb0ba5b95ab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -0,0 +1,144 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// snapshot is an record of a spans state at a particular checkpointed time.
+// It is used as a read-only representation of that state.
+type snapshot struct {
+	name                  string
+	spanContext           trace.SpanContext
+	parent                trace.SpanContext
+	spanKind              trace.SpanKind
+	startTime             time.Time
+	endTime               time.Time
+	attributes            []attribute.KeyValue
+	events                []Event
+	links                 []Link
+	status                Status
+	childSpanCount        int
+	droppedAttributeCount int
+	droppedEventCount     int
+	droppedLinkCount      int
+	resource              *resource.Resource
+	instrumentationScope  instrumentation.Scope
+}
+
+var _ ReadOnlySpan = snapshot{}
+
+func (s snapshot) private() {}
+
+// Name returns the name of the span.
+func (s snapshot) Name() string {
+	return s.name
+}
+
+// SpanContext returns the unique SpanContext that identifies the span.
+func (s snapshot) SpanContext() trace.SpanContext {
+	return s.spanContext
+}
+
+// Parent returns the unique SpanContext that identifies the parent of the
+// span if one exists. If the span has no parent the returned SpanContext
+// will be invalid.
+func (s snapshot) Parent() trace.SpanContext {
+	return s.parent
+}
+
+// SpanKind returns the role the span plays in a Trace.
+func (s snapshot) SpanKind() trace.SpanKind {
+	return s.spanKind
+}
+
+// StartTime returns the time the span started recording.
+func (s snapshot) StartTime() time.Time {
+	return s.startTime
+}
+
+// EndTime returns the time the span stopped recording. It will be zero if
+// the span has not ended.
+func (s snapshot) EndTime() time.Time {
+	return s.endTime
+}
+
+// Attributes returns the defining attributes of the span.
+func (s snapshot) Attributes() []attribute.KeyValue {
+	return s.attributes
+}
+
+// Links returns all the links the span has to other spans.
+func (s snapshot) Links() []Link {
+	return s.links
+}
+
+// Events returns all the events that occurred within in the spans
+// lifetime.
+func (s snapshot) Events() []Event {
+	return s.events
+}
+
+// Status returns the spans status.
+func (s snapshot) Status() Status {
+	return s.status
+}
+
+// InstrumentationScope returns information about the instrumentation
+// scope that created the span.
+func (s snapshot) InstrumentationScope() instrumentation.Scope {
+	return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns information about the instrumentation
+// library that created the span.
+func (s snapshot) InstrumentationLibrary() instrumentation.Library {
+	return s.instrumentationScope
+}
+
+// Resource returns information about the entity that produced the span.
+func (s snapshot) Resource() *resource.Resource {
+	return s.resource
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s snapshot) DroppedAttributes() int {
+	return s.droppedAttributeCount
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s snapshot) DroppedLinks() int {
+	return s.droppedLinkCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s snapshot) DroppedEvents() int {
+	return s.droppedEventCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s snapshot) ChildSpanCount() int {
+	return s.childSpanCount
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
new file mode 100644
index 0000000000000000000000000000000000000000..85bc702a019b571735e94095de8b9d5924cd8042
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -0,0 +1,852 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"runtime"
+	rt "runtime/trace"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/internal"
+	"go.opentelemetry.io/otel/sdk/resource"
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+// ReadOnlySpan allows reading information from the data structure underlying a
+// trace.Span. It is used in places where reading information from a span is
+// necessary but changing the span isn't necessary or allowed.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadOnlySpan interface {
+	// Name returns the name of the span.
+	Name() string
+	// SpanContext returns the unique SpanContext that identifies the span.
+	SpanContext() trace.SpanContext
+	// Parent returns the unique SpanContext that identifies the parent of the
+	// span if one exists. If the span has no parent the returned SpanContext
+	// will be invalid.
+	Parent() trace.SpanContext
+	// SpanKind returns the role the span plays in a Trace.
+	SpanKind() trace.SpanKind
+	// StartTime returns the time the span started recording.
+	StartTime() time.Time
+	// EndTime returns the time the span stopped recording. It will be zero if
+	// the span has not ended.
+	EndTime() time.Time
+	// Attributes returns the defining attributes of the span.
+	// The order of the returned attributes is not guaranteed to be stable across invocations.
+	Attributes() []attribute.KeyValue
+	// Links returns all the links the span has to other spans.
+	Links() []Link
+	// Events returns all the events that occurred within in the spans
+	// lifetime.
+	Events() []Event
+	// Status returns the spans status.
+	Status() Status
+	// InstrumentationScope returns information about the instrumentation
+	// scope that created the span.
+	InstrumentationScope() instrumentation.Scope
+	// InstrumentationLibrary returns information about the instrumentation
+	// library that created the span.
+	// Deprecated: please use InstrumentationScope instead.
+	InstrumentationLibrary() instrumentation.Library
+	// Resource returns information about the entity that produced the span.
+	Resource() *resource.Resource
+	// DroppedAttributes returns the number of attributes dropped by the span
+	// due to limits being reached.
+	DroppedAttributes() int
+	// DroppedLinks returns the number of links dropped by the span due to
+	// limits being reached.
+	DroppedLinks() int
+	// DroppedEvents returns the number of events dropped by the span due to
+	// limits being reached.
+	DroppedEvents() int
+	// ChildSpanCount returns the count of spans that consider the span a
+	// direct parent.
+	ChildSpanCount() int
+
+	// A private method to prevent users implementing the
+	// interface and so future additions to it will not
+	// violate compatibility.
+	private()
+}
+
+// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
+// reading information from the underlying data structure.
+// This interface exposes the union of the methods of trace.Span (which is a
+// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
+// information should be added under trace.Span or ReadOnlySpan, respectively.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadWriteSpan interface {
+	trace.Span
+	ReadOnlySpan
+}
+
+// recordingSpan is an implementation of the OpenTelemetry Span API
+// representing the individual component of a trace that is sampled.
+type recordingSpan struct {
+	embedded.Span
+
+	// mu protects the contents of this span.
+	mu sync.Mutex
+
+	// parent holds the parent span of this span as a trace.SpanContext.
+	parent trace.SpanContext
+
+	// spanKind represents the kind of this span as a trace.SpanKind.
+	spanKind trace.SpanKind
+
+	// name is the name of this span.
+	name string
+
+	// startTime is the time at which this span was started.
+	startTime time.Time
+
+	// endTime is the time at which this span was ended. It contains the zero
+	// value of time.Time until the span is ended.
+	endTime time.Time
+
+	// status is the status of this span.
+	status Status
+
+	// childSpanCount holds the number of child spans created for this span.
+	childSpanCount int
+
+	// spanContext holds the SpanContext of this span.
+	spanContext trace.SpanContext
+
+	// attributes is a collection of user provided key/values. The collection
+	// is constrained by a configurable maximum held by the parent
+	// TracerProvider. When additional attributes are added after this maximum
+	// is reached these attributes the user is attempting to add are dropped.
+	// This dropped number of attributes is tracked and reported in the
+	// ReadOnlySpan exported when the span ends.
+	attributes        []attribute.KeyValue
+	droppedAttributes int
+
+	// events are stored in FIFO queue capped by configured limit.
+	events evictedQueue
+
+	// links are stored in FIFO queue capped by configured limit.
+	links evictedQueue
+
+	// executionTracerTaskEnd ends the execution tracer span.
+	executionTracerTaskEnd func()
+
+	// tracer is the SDK tracer that created this span.
+	tracer *tracer
+}
+
+var (
+	_ ReadWriteSpan = (*recordingSpan)(nil)
+	_ runtimeTracer = (*recordingSpan)(nil)
+)
+
+// SpanContext returns the SpanContext of this span.
+func (s *recordingSpan) SpanContext() trace.SpanContext {
+	if s == nil {
+		return trace.SpanContext{}
+	}
+	return s.spanContext
+}
+
+// IsRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+func (s *recordingSpan) IsRecording() bool {
+	if s == nil {
+		return false
+	}
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.endTime.IsZero()
+}
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in the set status when the code is for an error. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) SetStatus(code codes.Code, description string) {
+	if !s.IsRecording() {
+		return
+	}
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.status.Code > code {
+		return
+	}
+
+	status := Status{Code: code}
+	if code == codes.Error {
+		status.Description = description
+	}
+
+	s.status = status
+}
+
+// ensureAttributesCapacity inlines functionality from slices.Grow
+// so that we can avoid needing to import golang.org/x/exp for go1.20.
+// Once support for go1.20 is dropped, we can use slices.Grow available since go1.21 instead.
+// Tracking issue: https://github.com/open-telemetry/opentelemetry-go/issues/4819.
+func (s *recordingSpan) ensureAttributesCapacity(minCapacity int) {
+	if n := minCapacity - cap(s.attributes); n > 0 {
+		s.attributes = append(s.attributes[:cap(s.attributes)], make([]attribute.KeyValue, n)...)[:len(s.attributes)]
+	}
+}
+
+// SetAttributes sets attributes of this span.
+//
+// If a key from attributes already exists the value associated with that key
+// will be overwritten with the value contained in attributes.
+//
+// If this span is not being recorded than this method does nothing.
+//
+// If adding attributes to the span would exceed the maximum amount of
+// attributes the span is configured to have, the last added attributes will
+// be dropped.
+func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
+	if !s.IsRecording() {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	limit := s.tracer.provider.spanLimits.AttributeCountLimit
+	if limit == 0 {
+		// No attributes allowed.
+		s.droppedAttributes += len(attributes)
+		return
+	}
+
+	// If adding these attributes could exceed the capacity of s perform a
+	// de-duplication and truncation while adding to avoid over allocation.
+	if limit > 0 && len(s.attributes)+len(attributes) > limit {
+		s.addOverCapAttrs(limit, attributes)
+		return
+	}
+
+	// Otherwise, add without deduplication. When attributes are read they
+	// will be deduplicated, optimizing the operation.
+	s.ensureAttributesCapacity(len(s.attributes) + len(attributes))
+	for _, a := range attributes {
+		if !a.Valid() {
+			// Drop all invalid attributes.
+			s.droppedAttributes++
+			continue
+		}
+		a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+		s.attributes = append(s.attributes, a)
+	}
+}
+
+// addOverCapAttrs adds the attributes attrs to the span s while
+// de-duplicating the attributes of s and attrs and dropping attributes that
+// exceed the limit.
+//
+// This method assumes s.mu.Lock is held by the caller.
+//
+// This method should only be called when there is a possibility that adding
+// attrs to s will exceed the limit. Otherwise, attrs should be added to s
+// without checking for duplicates and all retrieval methods of the attributes
+// for s will de-duplicate as needed.
+//
+// This method assumes limit is a value > 0. The argument should be validated
+// by the caller.
+func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
+	// In order to not allocate more capacity to s.attributes than needed,
+	// prune and truncate this addition of attributes while adding.
+
+	// Do not set a capacity when creating this map. Benchmark testing has
+	// showed this to only add unused memory allocations in general use.
+	exists := make(map[attribute.Key]int)
+	s.dedupeAttrsFromRecord(&exists)
+
+	// Now that s.attributes is deduplicated, adding unique attributes up to
+	// the capacity of s will not over allocate s.attributes.
+	if sum := len(attrs) + len(s.attributes); sum < limit {
+		// After support for go1.20 is dropped, simplify if-else to min(sum, limit).
+		s.ensureAttributesCapacity(sum)
+	} else {
+		s.ensureAttributesCapacity(limit)
+	}
+	for _, a := range attrs {
+		if !a.Valid() {
+			// Drop all invalid attributes.
+			s.droppedAttributes++
+			continue
+		}
+
+		if idx, ok := exists[a.Key]; ok {
+			// Perform all updates before dropping, even when at capacity.
+			s.attributes[idx] = a
+			continue
+		}
+
+		if len(s.attributes) >= limit {
+			// Do not just drop all of the remaining attributes, make sure
+			// updates are checked and performed.
+			s.droppedAttributes++
+		} else {
+			a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+			s.attributes = append(s.attributes, a)
+			exists[a.Key] = len(s.attributes) - 1
+		}
+	}
+}
+
+// truncateAttr returns a truncated version of attr. Only string and string
+// slice attribute values are truncated. String values are truncated to at
+// most a length of limit. Each string slice value is truncated in this fashion
+// (the slice length itself is unaffected).
+//
+// No truncation is performed for a negative limit.
+func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
+	if limit < 0 {
+		return attr
+	}
+	switch attr.Value.Type() {
+	case attribute.STRING:
+		if v := attr.Value.AsString(); len(v) > limit {
+			return attr.Key.String(safeTruncate(v, limit))
+		}
+	case attribute.STRINGSLICE:
+		v := attr.Value.AsStringSlice()
+		for i := range v {
+			if len(v[i]) > limit {
+				v[i] = safeTruncate(v[i], limit)
+			}
+		}
+		return attr.Key.StringSlice(v)
+	}
+	return attr
+}
+
+// safeTruncate truncates the string and guarantees valid UTF-8 is returned.
+func safeTruncate(input string, limit int) string {
+	if trunc, ok := safeTruncateValidUTF8(input, limit); ok {
+		return trunc
+	}
+	trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit)
+	return trunc
+}
+
+// safeTruncateValidUTF8 returns a copy of the input string safely truncated to
+// limit. The truncation is ensured to occur at the bounds of complete UTF-8
+// characters. If invalid encoding of UTF-8 is encountered, input is returned
+// with false, otherwise, the truncated input will be returned with true.
+func safeTruncateValidUTF8(input string, limit int) (string, bool) {
+	for cnt := 0; cnt <= limit; {
+		r, size := utf8.DecodeRuneInString(input[cnt:])
+		if r == utf8.RuneError {
+			return input, false
+		}
+
+		if cnt+size > limit {
+			return input[:cnt], true
+		}
+		cnt += size
+	}
+	return input, true
+}
+
+// End ends the span. This method does nothing if the span is already ended or
+// is not being recorded.
+//
+// The only SpanOption currently supported is WithTimestamp which will set the
+// end time for a Span's life-cycle.
+//
+// If this method is called while panicking an error event is added to the
+// Span before ending it and the panic is continued.
+func (s *recordingSpan) End(options ...trace.SpanEndOption) {
+	// Do not start by checking if the span is being recorded which requires
+	// acquiring a lock. Make a minimal check that the span is not nil.
+	if s == nil {
+		return
+	}
+
+	// Store the end time as soon as possible to avoid artificially increasing
+	// the span's duration in case some operation below takes a while.
+	et := internal.MonotonicEndTime(s.startTime)
+
+	// Do relative expensive check now that we have an end time and see if we
+	// need to do any more processing.
+	if !s.IsRecording() {
+		return
+	}
+
+	config := trace.NewSpanEndConfig(options...)
+	if recovered := recover(); recovered != nil {
+		// Record but don't stop the panic.
+		defer panic(recovered)
+		opts := []trace.EventOption{
+			trace.WithAttributes(
+				semconv.ExceptionType(typeStr(recovered)),
+				semconv.ExceptionMessage(fmt.Sprint(recovered)),
+			),
+		}
+
+		if config.StackTrace() {
+			opts = append(opts, trace.WithAttributes(
+				semconv.ExceptionStacktrace(recordStackTrace()),
+			))
+		}
+
+		s.addEvent(semconv.ExceptionEventName, opts...)
+	}
+
+	if s.executionTracerTaskEnd != nil {
+		s.executionTracerTaskEnd()
+	}
+
+	s.mu.Lock()
+	// Setting endTime to non-zero marks the span as ended and not recording.
+	if config.Timestamp().IsZero() {
+		s.endTime = et
+	} else {
+		s.endTime = config.Timestamp()
+	}
+	s.mu.Unlock()
+
+	sps := s.tracer.provider.getSpanProcessors()
+	if len(sps) == 0 {
+		return
+	}
+	snap := s.snapshot()
+	for _, sp := range sps {
+		sp.sp.OnEnd(snap)
+	}
+}
+
+// RecordError will record err as a span event for this span. An additional call to
+// SetStatus is required if the Status of the Span should be set to Error, this method
+// does not change the Span status. If this span is not being recorded or err is nil
+// than this method does nothing.
+func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
+	if s == nil || err == nil || !s.IsRecording() {
+		return
+	}
+
+	opts = append(opts, trace.WithAttributes(
+		semconv.ExceptionType(typeStr(err)),
+		semconv.ExceptionMessage(err.Error()),
+	))
+
+	c := trace.NewEventConfig(opts...)
+	if c.StackTrace() {
+		opts = append(opts, trace.WithAttributes(
+			semconv.ExceptionStacktrace(recordStackTrace()),
+		))
+	}
+
+	s.addEvent(semconv.ExceptionEventName, opts...)
+}
+
+func typeStr(i interface{}) string {
+	t := reflect.TypeOf(i)
+	if t.PkgPath() == "" && t.Name() == "" {
+		// Likely a builtin type.
+		return t.String()
+	}
+	return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func recordStackTrace() string {
+	stackTrace := make([]byte, 2048)
+	n := runtime.Stack(stackTrace, false)
+
+	return string(stackTrace[0:n])
+}
+
+// AddEvent adds an event with the provided name and options. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
+	if !s.IsRecording() {
+		return
+	}
+	s.addEvent(name, o...)
+}
+
+func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
+	c := trace.NewEventConfig(o...)
+	e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
+
+	// Discard attributes over limit.
+	limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
+	if limit == 0 {
+		// Drop all attributes.
+		e.DroppedAttributeCount = len(e.Attributes)
+		e.Attributes = nil
+	} else if limit > 0 && len(e.Attributes) > limit {
+		// Drop over capacity.
+		e.DroppedAttributeCount = len(e.Attributes) - limit
+		e.Attributes = e.Attributes[:limit]
+	}
+
+	s.mu.Lock()
+	s.events.add(e)
+	s.mu.Unlock()
+}
+
+// SetName sets the name of this span. If this span is not being recorded than
+// this method does nothing.
+func (s *recordingSpan) SetName(name string) {
+	if !s.IsRecording() {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.name = name
+}
+
+// Name returns the name of this span.
+func (s *recordingSpan) Name() string {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.name
+}
+
+// Name returns the SpanContext of this span's parent span.
+func (s *recordingSpan) Parent() trace.SpanContext {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.parent
+}
+
+// SpanKind returns the SpanKind of this span.
+func (s *recordingSpan) SpanKind() trace.SpanKind {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.spanKind
+}
+
+// StartTime returns the time this span started.
+func (s *recordingSpan) StartTime() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.startTime
+}
+
+// EndTime returns the time this span ended. For spans that have not yet
+// ended, the returned value will be the zero value of time.Time.
+func (s *recordingSpan) EndTime() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.endTime
+}
+
+// Attributes returns the attributes of this span.
+//
+// The order of the returned attributes is not guaranteed to be stable.
+func (s *recordingSpan) Attributes() []attribute.KeyValue {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.dedupeAttrs()
+	return s.attributes
+}
+
+// dedupeAttrs deduplicates the attributes of s to fit capacity.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrs() {
+	// Do not set a capacity when creating this map. Benchmark testing has
+	// showed this to only add unused memory allocations in general use.
+	exists := make(map[attribute.Key]int)
+	s.dedupeAttrsFromRecord(&exists)
+}
+
+// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
+// using record as the record of unique attribute keys to their index.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
+	// Use the fact that slices share the same backing array.
+	unique := s.attributes[:0]
+	for _, a := range s.attributes {
+		if idx, ok := (*record)[a.Key]; ok {
+			unique[idx] = a
+		} else {
+			unique = append(unique, a)
+			(*record)[a.Key] = len(unique) - 1
+		}
+	}
+	// s.attributes have element types of attribute.KeyValue. These types are
+	// not pointers and they themselves do not contain pointer fields,
+	// therefore the duplicate values do not need to be zeroed for them to be
+	// garbage collected.
+	s.attributes = unique
+}
+
+// Links returns the links of this span.
+func (s *recordingSpan) Links() []Link {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if len(s.links.queue) == 0 {
+		return []Link{}
+	}
+	return s.interfaceArrayToLinksArray()
+}
+
+// Events returns the events of this span.
+func (s *recordingSpan) Events() []Event {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if len(s.events.queue) == 0 {
+		return []Event{}
+	}
+	return s.interfaceArrayToEventArray()
+}
+
+// Status returns the status of this span.
+func (s *recordingSpan) Status() Status {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.status
+}
+
+// InstrumentationScope returns the instrumentation.Scope associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.instrumentationScope
+}
+
+// InstrumentationLibrary returns the instrumentation.Library associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.instrumentationScope
+}
+
+// Resource returns the Resource associated with the Tracer that created this
+// span.
+func (s *recordingSpan) Resource() *resource.Resource {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.provider.resource
+}
+
+func (s *recordingSpan) addLink(link trace.Link) {
+	if !s.IsRecording() || !link.SpanContext.IsValid() {
+		return
+	}
+
+	l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
+
+	// Discard attributes over limit.
+	limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
+	if limit == 0 {
+		// Drop all attributes.
+		l.DroppedAttributeCount = len(l.Attributes)
+		l.Attributes = nil
+	} else if limit > 0 && len(l.Attributes) > limit {
+		l.DroppedAttributeCount = len(l.Attributes) - limit
+		l.Attributes = l.Attributes[:limit]
+	}
+
+	s.mu.Lock()
+	s.links.add(l)
+	s.mu.Unlock()
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s *recordingSpan) DroppedAttributes() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.droppedAttributes
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s *recordingSpan) DroppedLinks() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.links.droppedCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s *recordingSpan) DroppedEvents() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.events.droppedCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s *recordingSpan) ChildSpanCount() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.childSpanCount
+}
+
+// TracerProvider returns a trace.TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *recordingSpan) TracerProvider() trace.TracerProvider {
+	return s.tracer.provider
+}
+
+// snapshot creates a read-only copy of the current state of the span.
+func (s *recordingSpan) snapshot() ReadOnlySpan {
+	var sd snapshot
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	sd.endTime = s.endTime
+	sd.instrumentationScope = s.tracer.instrumentationScope
+	sd.name = s.name
+	sd.parent = s.parent
+	sd.resource = s.tracer.provider.resource
+	sd.spanContext = s.spanContext
+	sd.spanKind = s.spanKind
+	sd.startTime = s.startTime
+	sd.status = s.status
+	sd.childSpanCount = s.childSpanCount
+
+	if len(s.attributes) > 0 {
+		s.dedupeAttrs()
+		sd.attributes = s.attributes
+	}
+	sd.droppedAttributeCount = s.droppedAttributes
+	if len(s.events.queue) > 0 {
+		sd.events = s.interfaceArrayToEventArray()
+		sd.droppedEventCount = s.events.droppedCount
+	}
+	if len(s.links.queue) > 0 {
+		sd.links = s.interfaceArrayToLinksArray()
+		sd.droppedLinkCount = s.links.droppedCount
+	}
+	return &sd
+}
+
+func (s *recordingSpan) interfaceArrayToLinksArray() []Link {
+	linkArr := make([]Link, 0)
+	for _, value := range s.links.queue {
+		linkArr = append(linkArr, value.(Link))
+	}
+	return linkArr
+}
+
+func (s *recordingSpan) interfaceArrayToEventArray() []Event {
+	eventArr := make([]Event, 0)
+	for _, value := range s.events.queue {
+		eventArr = append(eventArr, value.(Event))
+	}
+	return eventArr
+}
+
+func (s *recordingSpan) addChild() {
+	if !s.IsRecording() {
+		return
+	}
+	s.mu.Lock()
+	s.childSpanCount++
+	s.mu.Unlock()
+}
+
+func (*recordingSpan) private() {}
+
+// runtimeTrace starts a "runtime/trace".Task for the span and returns a
+// context containing the task.
+func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
+	if !rt.IsEnabled() {
+		// Avoid additional overhead if runtime/trace is not enabled.
+		return ctx
+	}
+	nctx, task := rt.NewTask(ctx, s.name)
+
+	s.mu.Lock()
+	s.executionTracerTaskEnd = task.End
+	s.mu.Unlock()
+
+	return nctx
+}
+
+// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
+// that wraps a SpanContext. It performs no operations other than to return
+// the wrapped SpanContext or TracerProvider that created it.
+type nonRecordingSpan struct {
+	embedded.Span
+
+	// tracer is the SDK tracer that created this span.
+	tracer *tracer
+	sc     trace.SpanContext
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+// TracerProvider returns the trace.TracerProvider that provided the Tracer
+// that created this span.
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
+
+func isRecording(s SamplingResult) bool {
+	return s.Decision == RecordOnly || s.Decision == RecordAndSample
+}
+
+func isSampled(s SamplingResult) bool {
+	return s.Decision == RecordAndSample
+}
+
+// Status is the classified state of a Span.
+type Status struct {
+	// Code is an identifier of a Spans state classification.
+	Code codes.Code
+	// Description is a user hint about why that status was set. It is only
+	// applicable when Code is Error.
+	Description string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9bd52f7ad4277689eaeca61699f0a8589c1844c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "context"
+
+// SpanExporter handles the delivery of spans to external receivers. This is
+// the final component in the trace export pipeline.
+type SpanExporter interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ExportSpans exports a batch of spans.
+	//
+	// This function is called synchronously, so there is no concurrency
+	// safety requirement. However, due to the synchronous calling pattern,
+	// it is critical that all timeouts and cancellations contained in the
+	// passed context must be honored.
+	//
+	// Any retry logic must be contained in this function. The SDK that
+	// calls this function will not implement any retry logic. All errors
+	// returned by this function are considered unrecoverable and will be
+	// reported to a configured error Handler.
+	ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown notifies the exporter of a pending halt to operations. The
+	// exporter is expected to perform any cleanup or synchronization it
+	// requires while honoring all timeouts and cancellations contained in
+	// the passed context.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa4d4221db36d0f28ae493b81265803fa89750a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -0,0 +1,125 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "go.opentelemetry.io/otel/sdk/internal/env"
+
+const (
+	// DefaultAttributeValueLengthLimit is the default maximum allowed
+	// attribute value length, unlimited.
+	DefaultAttributeValueLengthLimit = -1
+
+	// DefaultAttributeCountLimit is the default maximum number of attributes
+	// a span can have.
+	DefaultAttributeCountLimit = 128
+
+	// DefaultEventCountLimit is the default maximum number of events a span
+	// can have.
+	DefaultEventCountLimit = 128
+
+	// DefaultLinkCountLimit is the default maximum number of links a span can
+	// have.
+	DefaultLinkCountLimit = 128
+
+	// DefaultAttributePerEventCountLimit is the default maximum number of
+	// attributes a span event can have.
+	DefaultAttributePerEventCountLimit = 128
+
+	// DefaultAttributePerLinkCountLimit is the default maximum number of
+	// attributes a span link can have.
+	DefaultAttributePerLinkCountLimit = 128
+)
+
+// SpanLimits represents the limits of a span.
+type SpanLimits struct {
+	// AttributeValueLengthLimit is the maximum allowed attribute value length.
+	//
+	// This limit only applies to string and string slice attribute values.
+	// Any string longer than this value will be truncated to this length.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributeValueLengthLimit int
+
+	// AttributeCountLimit is the maximum allowed span attribute count. Any
+	// attribute added to a span once this limit is reached will be dropped.
+	//
+	// Setting this to zero means no attributes will be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributeCountLimit int
+
+	// EventCountLimit is the maximum allowed span event count. Any event
+	// added to a span once this limit is reached means it will be added but
+	// the oldest event will be dropped.
+	//
+	// Setting this to zero means no events we be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	EventCountLimit int
+
+	// LinkCountLimit is the maximum allowed span link count. Any link added
+	// to a span once this limit is reached means it will be added but the
+	// oldest link will be dropped.
+	//
+	// Setting this to zero means no links we be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	LinkCountLimit int
+
+	// AttributePerEventCountLimit is the maximum number of attributes allowed
+	// per span event. Any attribute added after this limit reached will be
+	// dropped.
+	//
+	// Setting this to zero means no attributes will be recorded for events.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributePerEventCountLimit int
+
+	// AttributePerLinkCountLimit is the maximum number of attributes allowed
+	// per span link. Any attribute added after this limit reached will be
+	// dropped.
+	//
+	// Setting this to zero means no attributes will be recorded for links.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributePerLinkCountLimit int
+}
+
+// NewSpanLimits returns a SpanLimits with all limits set to the value their
+// corresponding environment variable holds, or the default if unset.
+//
+// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+// (default: unlimited)
+//
+// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
+//
+// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
+//
+// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
+// 128)
+//
+// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
+//
+// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
+func NewSpanLimits() SpanLimits {
+	return SpanLimits{
+		AttributeValueLengthLimit:   env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
+		AttributeCountLimit:         env.SpanAttributeCount(DefaultAttributeCountLimit),
+		EventCountLimit:             env.SpanEventCount(DefaultEventCountLimit),
+		LinkCountLimit:              env.SpanLinkCount(DefaultLinkCountLimit),
+		AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
+		AttributePerLinkCountLimit:  env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c53657a7190f7d9da0f4ddfd25a26847d8a8b81
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -0,0 +1,72 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+)
+
+// SpanProcessor is a processing pipeline for spans in the trace signal.
+// SpanProcessors registered with a TracerProvider and are called at the start
+// and end of a Span's lifecycle, and are called in the order they are
+// registered.
+type SpanProcessor interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnStart is called when a span is started. It is called synchronously
+	// and should not block.
+	OnStart(parent context.Context, s ReadWriteSpan)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnEnd is called when span is finished. It is called synchronously and
+	// hence not block.
+	OnEnd(s ReadOnlySpan)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown is called when the SDK shuts down. Any cleanup or release of
+	// resources held by the processor should be done in this call.
+	//
+	// Calls to OnStart, OnEnd, or ForceFlush after this has been called
+	// should be ignored.
+	//
+	// All timeouts and cancellations contained in ctx must be honored, this
+	// should not block indefinitely.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ForceFlush exports all ended spans to the configured Exporter that have not yet
+	// been exported.  It should only be called when absolutely necessary, such as when
+	// using a FaaS provider that may suspend the process after an invocation, but before
+	// the Processor can export the completed spans.
+	ForceFlush(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type spanProcessorState struct {
+	sp    SpanProcessor
+	state sync.Once
+}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+	return &spanProcessorState{sp: sp}
+}
+
+type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
new file mode 100644
index 0000000000000000000000000000000000000000..301e1a7abccf55f80c8ce71f75c7a6524cceff82
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -0,0 +1,164 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+type tracer struct {
+	embedded.Tracer
+
+	provider             *TracerProvider
+	instrumentationScope instrumentation.Scope
+}
+
+var _ trace.Tracer = &tracer{}
+
+// Start starts a Span and returns it along with a context containing it.
+//
+// The Span is created with the provided name and as a child of any existing
+// span context found in the passed context. The created Span will be
+// configured appropriately by any SpanOption passed.
+func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
+	config := trace.NewSpanStartConfig(options...)
+
+	if ctx == nil {
+		// Prevent trace.ContextWithSpan from panicking.
+		ctx = context.Background()
+	}
+
+	// For local spans created by this SDK, track child span count.
+	if p := trace.SpanFromContext(ctx); p != nil {
+		if sdkSpan, ok := p.(*recordingSpan); ok {
+			sdkSpan.addChild()
+		}
+	}
+
+	s := tr.newSpan(ctx, name, &config)
+	if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
+		sps := tr.provider.getSpanProcessors()
+		for _, sp := range sps {
+			sp.sp.OnStart(ctx, rw)
+		}
+	}
+	if rtt, ok := s.(runtimeTracer); ok {
+		ctx = rtt.runtimeTrace(ctx)
+	}
+
+	return trace.ContextWithSpan(ctx, s), s
+}
+
+type runtimeTracer interface {
+	// runtimeTrace starts a "runtime/trace".Task for the span and
+	// returns a context containing the task.
+	runtimeTrace(ctx context.Context) context.Context
+}
+
+// newSpan returns a new configured span.
+func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
+	// If told explicitly to make this a new root use a zero value SpanContext
+	// as a parent which contains an invalid trace ID and is not remote.
+	var psc trace.SpanContext
+	if config.NewRoot() {
+		ctx = trace.ContextWithSpanContext(ctx, psc)
+	} else {
+		psc = trace.SpanContextFromContext(ctx)
+	}
+
+	// If there is a valid parent trace ID, use it to ensure the continuity of
+	// the trace. Always generate a new span ID so other components can rely
+	// on a unique span ID, even if the Span is non-recording.
+	var tid trace.TraceID
+	var sid trace.SpanID
+	if !psc.TraceID().IsValid() {
+		tid, sid = tr.provider.idGenerator.NewIDs(ctx)
+	} else {
+		tid = psc.TraceID()
+		sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
+	}
+
+	samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
+		ParentContext: ctx,
+		TraceID:       tid,
+		Name:          name,
+		Kind:          config.SpanKind(),
+		Attributes:    config.Attributes(),
+		Links:         config.Links(),
+	})
+
+	scc := trace.SpanContextConfig{
+		TraceID:    tid,
+		SpanID:     sid,
+		TraceState: samplingResult.Tracestate,
+	}
+	if isSampled(samplingResult) {
+		scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
+	} else {
+		scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
+	}
+	sc := trace.NewSpanContext(scc)
+
+	if !isRecording(samplingResult) {
+		return tr.newNonRecordingSpan(sc)
+	}
+	return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+}
+
+// newRecordingSpan returns a new configured recordingSpan.
+func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
+	startTime := config.Timestamp()
+	if startTime.IsZero() {
+		startTime = time.Now()
+	}
+
+	s := &recordingSpan{
+		// Do not pre-allocate the attributes slice here! Doing so will
+		// allocate memory that is likely never going to be used, or if used,
+		// will be over-sized. The default Go compiler has been tested to
+		// dynamically allocate needed space very well. Benchmarking has shown
+		// it to be more performant than what we can predetermine here,
+		// especially for the common use case of few to no added
+		// attributes.
+
+		parent:      psc,
+		spanContext: sc,
+		spanKind:    trace.ValidateSpanKind(config.SpanKind()),
+		name:        name,
+		startTime:   startTime,
+		events:      newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
+		links:       newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
+		tracer:      tr,
+	}
+
+	for _, l := range config.Links() {
+		s.addLink(l)
+	}
+
+	s.SetAttributes(sr.Attributes...)
+	s.SetAttributes(config.Attributes()...)
+
+	return s
+}
+
+// newNonRecordingSpan returns a new configured nonRecordingSpan.
+func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
+	return nonRecordingSpan{tracer: tr, sc: sc}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3457ed1355aa27f6ef514b2573a50c9fd4e39dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+	return "1.16.0-rc.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..b6910a0113ddb2f1d48ebd92d5b4c8571cde1c19
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sdk // import "go.opentelemetry.io/otel/sdk"
+
+// Version is the current release version of the OpenTelemetry SDK in use.
+func Version() string {
+	return "1.23.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
new file mode 100644
index 0000000000000000000000000000000000000000..e6cf895105363c65e86347f906a93e1953846edb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
@@ -0,0 +1,1877 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+	// ClientAddressKey is the attribute Key conforming to the "client.address"
+	// semantic conventions. It represents the client address - unix domain
+	// socket name, IPv4 or IPv6 address.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/tmp/my.sock', '10.1.2.80'
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.address` SHOULD represent client address behind
+	// any intermediaries (e.g. proxies) if it's available.
+	ClientAddressKey = attribute.Key("client.address")
+
+	// ClientPortKey is the attribute Key conforming to the "client.port"
+	// semantic conventions. It represents the client port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.port` SHOULD represent client port behind any
+	// intermediaries (e.g. proxies) if it's available.
+	ClientPortKey = attribute.Key("client.port")
+
+	// ClientSocketAddressKey is the attribute Key conforming to the
+	// "client.socket.address" semantic conventions. It represents the
+	// immediate client peer address - unix domain socket name, IPv4 or IPv6
+	// address.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (If different than `client.address`.)
+	// Stability: stable
+	// Examples: '/tmp/my.sock', '127.0.0.1'
+	ClientSocketAddressKey = attribute.Key("client.socket.address")
+
+	// ClientSocketPortKey is the attribute Key conforming to the
+	// "client.socket.port" semantic conventions. It represents the immediate
+	// client peer port number
+	//
+	// Type: int
+	// RequirementLevel: Recommended (If different than `client.port`.)
+	// Stability: stable
+	// Examples: 35555
+	ClientSocketPortKey = attribute.Key("client.socket.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// unix domain socket name, IPv4 or IPv6 address.
+func ClientAddress(val string) attribute.KeyValue {
+	return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number
+func ClientPort(val int) attribute.KeyValue {
+	return ClientPortKey.Int(val)
+}
+
+// ClientSocketAddress returns an attribute KeyValue conforming to the
+// "client.socket.address" semantic conventions. It represents the immediate
+// client peer address - unix domain socket name, IPv4 or IPv6 address.
+func ClientSocketAddress(val string) attribute.KeyValue {
+	return ClientSocketAddressKey.String(val)
+}
+
+// ClientSocketPort returns an attribute KeyValue conforming to the
+// "client.socket.port" semantic conventions. It represents the immediate
+// client peer port number
+func ClientSocketPort(val int) attribute.KeyValue {
+	return ClientSocketPortKey.Int(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+	// HTTPMethodKey is the attribute Key conforming to the "http.method"
+	// semantic conventions. It represents the deprecated, use
+	// `http.request.method` instead.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'GET', 'POST', 'HEAD'
+	HTTPMethodKey = attribute.Key("http.method")
+
+	// HTTPStatusCodeKey is the attribute Key conforming to the
+	// "http.status_code" semantic conventions. It represents the deprecated,
+	// use `http.response.status_code` instead.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 200
+	HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+	// HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+	// semantic conventions. It represents the deprecated, use `url.scheme`
+	// instead.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'http', 'https'
+	HTTPSchemeKey = attribute.Key("http.scheme")
+
+	// HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+	// conventions. It represents the deprecated, use `url.full` instead.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+	HTTPURLKey = attribute.Key("http.url")
+
+	// HTTPTargetKey is the attribute Key conforming to the "http.target"
+	// semantic conventions. It represents the deprecated, use `url.path` and
+	// `url.query` instead.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/search?q=OpenTelemetry#SemConv'
+	HTTPTargetKey = attribute.Key("http.target")
+
+	// HTTPRequestContentLengthKey is the attribute Key conforming to the
+	// "http.request_content_length" semantic conventions. It represents the
+	// deprecated, use `http.request.body.size` instead.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 3495
+	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+	// HTTPResponseContentLengthKey is the attribute Key conforming to the
+	// "http.response_content_length" semantic conventions. It represents the
+	// deprecated, use `http.response.body.size` instead.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 3495
+	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the deprecated, use
+// `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+	return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the deprecated, use
+// `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+	return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the deprecated, use `url.scheme`
+// instead.
+func HTTPScheme(val string) attribute.KeyValue {
+	return HTTPSchemeKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the deprecated, use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+	return HTTPURLKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the deprecated, use `url.path` and
+// `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+	return HTTPTargetKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the
+// deprecated, use `http.request.body.size` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+	return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the
+// deprecated, use `http.response.body.size` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+	return HTTPResponseContentLengthKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetSockPeerNameKey is the attribute Key conforming to the
+	// "net.sock.peer.name" semantic conventions. It represents the deprecated,
+	// use `server.socket.domain` on client spans.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/var/my.sock'
+	NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+	// NetSockPeerAddrKey is the attribute Key conforming to the
+	// "net.sock.peer.addr" semantic conventions. It represents the deprecated,
+	// use `server.socket.address` on client spans and `client.socket.address`
+	// on server spans.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '192.168.0.1'
+	NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+	// NetSockPeerPortKey is the attribute Key conforming to the
+	// "net.sock.peer.port" semantic conventions. It represents the deprecated,
+	// use `server.socket.port` on client spans and `client.socket.port` on
+	// server spans.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 65531
+	NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+	// NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+	// semantic conventions. It represents the deprecated, use `server.address`
+	// on client spans and `client.address` on server spans.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'example.com'
+	NetPeerNameKey = attribute.Key("net.peer.name")
+
+	// NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+	// semantic conventions. It represents the deprecated, use `server.port` on
+	// client spans and `client.port` on server spans.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	NetPeerPortKey = attribute.Key("net.peer.port")
+
+	// NetHostNameKey is the attribute Key conforming to the "net.host.name"
+	// semantic conventions. It represents the deprecated, use
+	// `server.address`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'example.com'
+	NetHostNameKey = attribute.Key("net.host.name")
+
+	// NetHostPortKey is the attribute Key conforming to the "net.host.port"
+	// semantic conventions. It represents the deprecated, use `server.port`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	NetHostPortKey = attribute.Key("net.host.port")
+
+	// NetSockHostAddrKey is the attribute Key conforming to the
+	// "net.sock.host.addr" semantic conventions. It represents the deprecated,
+	// use `server.socket.address`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/var/my.sock'
+	NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+	// NetSockHostPortKey is the attribute Key conforming to the
+	// "net.sock.host.port" semantic conventions. It represents the deprecated,
+	// use `server.socket.port`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+	// NetTransportKey is the attribute Key conforming to the "net.transport"
+	// semantic conventions. It represents the deprecated, use
+	// `network.transport`.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	NetTransportKey = attribute.Key("net.transport")
+
+	// NetProtocolNameKey is the attribute Key conforming to the
+	// "net.protocol.name" semantic conventions. It represents the deprecated,
+	// use `network.protocol.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'amqp', 'http', 'mqtt'
+	NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+	// NetProtocolVersionKey is the attribute Key conforming to the
+	// "net.protocol.version" semantic conventions. It represents the
+	// deprecated, use `network.protocol.version`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '3.1.1'
+	NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+	// NetSockFamilyKey is the attribute Key conforming to the
+	// "net.sock.family" semantic conventions. It represents the deprecated,
+	// use `network.transport` and `network.type`.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	NetSockFamilyKey = attribute.Key("net.sock.family")
+)
+
+var (
+	// ip_tcp
+	NetTransportTCP = NetTransportKey.String("ip_tcp")
+	// ip_udp
+	NetTransportUDP = NetTransportKey.String("ip_udp")
+	// Named or anonymous pipe
+	NetTransportPipe = NetTransportKey.String("pipe")
+	// In-process communication
+	NetTransportInProc = NetTransportKey.String("inproc")
+	// Something else (non IP-based)
+	NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+	// IPv4 address
+	NetSockFamilyInet = NetSockFamilyKey.String("inet")
+	// IPv6 address
+	NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+	// Unix domain socket path
+	NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the deprecated, use
+// `server.socket.domain` on client spans.
+func NetSockPeerName(val string) attribute.KeyValue {
+	return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use
+// `server.socket.address` on client spans and `client.socket.address` on
+// server spans.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+	return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the deprecated, use
+// `server.socket.port` on client spans and `client.socket.port` on server
+// spans.
+func NetSockPeerPort(val int) attribute.KeyValue {
+	return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the deprecated, use
+// `server.address` on client spans and `client.address` on server spans.
+func NetPeerName(val string) attribute.KeyValue {
+	return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the deprecated, use
+// `server.port` on client spans and `client.port` on server spans.
+func NetPeerPort(val int) attribute.KeyValue {
+	return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the deprecated, use
+// `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+	return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the deprecated, use
+// `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+	return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the deprecated, use
+// `server.socket.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+	return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the deprecated, use
+// `server.socket.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+	return NetSockHostPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the deprecated, use
+// `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+	return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the deprecated,
+// use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+	return NetProtocolVersionKey.String(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+	// DestinationDomainKey is the attribute Key conforming to the
+	// "destination.domain" semantic conventions. It represents the domain name
+	// of the destination system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'foo.example.com'
+	// Note: This value may be a host name, a fully qualified domain name, or
+	// another host naming format.
+	DestinationDomainKey = attribute.Key("destination.domain")
+
+	// DestinationAddressKey is the attribute Key conforming to the
+	// "destination.address" semantic conventions. It represents the peer
+	// address, for example IP address or UNIX socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10.5.3.2'
+	DestinationAddressKey = attribute.Key("destination.address")
+
+	// DestinationPortKey is the attribute Key conforming to the
+	// "destination.port" semantic conventions. It represents the peer port
+	// number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3389, 2888
+	DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationDomain returns an attribute KeyValue conforming to the
+// "destination.domain" semantic conventions. It represents the domain name of
+// the destination system.
+func DestinationDomain(val string) attribute.KeyValue {
+	return DestinationDomainKey.String(val)
+}
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the peer address,
+// for example IP address or UNIX socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+	return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the peer port number
+func DestinationPort(val int) attribute.KeyValue {
+	return DestinationPortKey.Int(val)
+}
+
+// Describes HTTP attributes.
+const (
+	// HTTPRequestMethodKey is the attribute Key conforming to the
+	// "http.request.method" semantic conventions. It represents the hTTP
+	// request method.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'GET', 'POST', 'HEAD'
+	// Note: HTTP request method value SHOULD be "known" to the
+	// instrumentation.
+	// By default, this convention defines "known" methods as the ones listed
+	// in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+	// and the PATCH method defined in
+	// [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+	//
+	// If the HTTP request method is not known to instrumentation, it MUST set
+	// the `http.request.method` attribute to `_OTHER` and, except if reporting
+	// a metric, MUST
+	// set the exact method received in the request line as value of the
+	// `http.request.method_original` attribute.
+	//
+	// If the HTTP instrumentation could end up converting valid HTTP request
+	// methods to `_OTHER`, then it MUST provide a way to override
+	// the list of known HTTP methods. If this override is done via environment
+	// variable, then the environment variable MUST be named
+	// OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+	// list of case-sensitive known HTTP methods
+	// (this list MUST be a full override of the default known method, it is
+	// not a list of known methods in addition to the defaults).
+	//
+	// HTTP method names are case-sensitive and `http.request.method` attribute
+	// value MUST match a known HTTP method name exactly.
+	// Instrumentations for specific web frameworks that consider HTTP methods
+	// to be case insensitive, SHOULD populate a canonical equivalent.
+	// Tracing instrumentations that do so, MUST also set
+	// `http.request.method_original` to the original value.
+	HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+	// HTTPResponseStatusCodeKey is the attribute Key conforming to the
+	// "http.response.status_code" semantic conventions. It represents the
+	// [HTTP response status
+	// code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If and only if one was
+	// received/sent.)
+	// Stability: stable
+	// Examples: 200
+	HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+)
+
+var (
+	// CONNECT method
+	HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+	// DELETE method
+	HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+	// GET method
+	HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+	// HEAD method
+	HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+	// OPTIONS method
+	HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+	// PATCH method
+	HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+	// POST method
+	HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+	// PUT method
+	HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+	// TRACE method
+	HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+	// Any HTTP method that the instrumentation has no prior knowledge of
+	HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+	return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTP Server attributes
+const (
+	// HTTPRouteKey is the attribute Key conforming to the "http.route"
+	// semantic conventions. It represents the matched route (path template in
+	// the format used by the respective server framework). See note below
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If and only if it's available)
+	// Stability: stable
+	// Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+	// Note: MUST NOT be populated when this is not supported by the HTTP
+	// server framework as the route attribute should have low-cardinality and
+	// the URI path can NOT substitute it.
+	// SHOULD include the [application
+	// root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+	HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+	return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+	// EventNameKey is the attribute Key conforming to the "event.name"
+	// semantic conventions. It represents the name identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'click', 'exception'
+	EventNameKey = attribute.Key("event.name")
+
+	// EventDomainKey is the attribute Key conforming to the "event.domain"
+	// semantic conventions. It represents the domain identifies the business
+	// context for the events.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Note: Events across different domains may have same `event.name`, yet be
+	// unrelated events.
+	EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+	// Events from browser apps
+	EventDomainBrowser = EventDomainKey.String("browser")
+	// Events from mobile apps
+	EventDomainDevice = EventDomainKey.String("device")
+	// Events from Kubernetes
+	EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+	return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+	// LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+	// semantic conventions. It represents a unique identifier for the Log
+	// Record.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+	// Note: If an id is provided, other log records with the same id will be
+	// considered duplicates and can be removed safely. This means, that two
+	// distinguishable log records MUST have different values.
+	// The id MAY be an [Universally Unique Lexicographically Sortable
+	// Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+	// (e.g. UUID) may be used as needed.
+	LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+	return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+	// LogIostreamKey is the attribute Key conforming to the "log.iostream"
+	// semantic conventions. It represents the stream associated with the log.
+	// See below for a list of well-known values.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+	// Logs from stdout stream
+	LogIostreamStdout = LogIostreamKey.String("stdout")
+	// Events from stderr stream
+	LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+	// LogFileNameKey is the attribute Key conforming to the "log.file.name"
+	// semantic conventions. It represents the basename of the file.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'audit.log'
+	LogFileNameKey = attribute.Key("log.file.name")
+
+	// LogFilePathKey is the attribute Key conforming to the "log.file.path"
+	// semantic conventions. It represents the full path to the file.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/var/log/mysql/audit.log'
+	LogFilePathKey = attribute.Key("log.file.path")
+
+	// LogFileNameResolvedKey is the attribute Key conforming to the
+	// "log.file.name_resolved" semantic conventions. It represents the
+	// basename of the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'uuid.log'
+	LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+	// LogFilePathResolvedKey is the attribute Key conforming to the
+	// "log.file.path_resolved" semantic conventions. It represents the full
+	// path to the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/var/lib/docker/uuid.log'
+	LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+	return LogFileNameKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+	return LogFilePathKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+	return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+	return LogFilePathResolvedKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+	// TypeKey is the attribute Key conforming to the "type" semantic
+	// conventions. It represents the type of memory.
+	//
+	// Type: Enum
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'heap', 'non_heap'
+	TypeKey = attribute.Key("type")
+
+	// PoolKey is the attribute Key conforming to the "pool" semantic
+	// conventions. It represents the name of the memory pool.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+	// Note: Pool names are generally obtained via
+	// [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+	PoolKey = attribute.Key("pool")
+)
+
+var (
+	// Heap memory
+	TypeHeap = TypeKey.String("heap")
+	// Non-heap memory
+	TypeNonHeap = TypeKey.String("non_heap")
+)
+
+// Pool returns an attribute KeyValue conforming to the "pool" semantic
+// conventions. It represents the name of the memory pool.
+func Pool(val string) attribute.KeyValue {
+	return PoolKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+	// ServerAddressKey is the attribute Key conforming to the "server.address"
+	// semantic conventions. It represents the logical server hostname, matches
+	// server FQDN if available, and IP or socket address if FQDN is not known.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'example.com'
+	ServerAddressKey = attribute.Key("server.address")
+
+	// ServerPortKey is the attribute Key conforming to the "server.port"
+	// semantic conventions. It represents the logical server port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 80, 8080, 443
+	ServerPortKey = attribute.Key("server.port")
+
+	// ServerSocketDomainKey is the attribute Key conforming to the
+	// "server.socket.domain" semantic conventions. It represents the domain
+	// name of an immediate peer.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (If different than `server.address`.)
+	// Stability: stable
+	// Examples: 'proxy.example.com'
+	// Note: Typically observed from the client side, and represents a proxy or
+	// other intermediary domain name.
+	ServerSocketDomainKey = attribute.Key("server.socket.domain")
+
+	// ServerSocketAddressKey is the attribute Key conforming to the
+	// "server.socket.address" semantic conventions. It represents the physical
+	// server IP address or Unix socket address. If set from the client, should
+	// simply use the socket's peer address, and not attempt to find any actual
+	// server IP (i.e., if set from client, this may represent some proxy
+	// server instead of the logical server).
+	//
+	// Type: string
+	// RequirementLevel: Recommended (If different than `server.address`.)
+	// Stability: stable
+	// Examples: '10.5.3.2'
+	ServerSocketAddressKey = attribute.Key("server.socket.address")
+
+	// ServerSocketPortKey is the attribute Key conforming to the
+	// "server.socket.port" semantic conventions. It represents the physical
+	// server port.
+	//
+	// Type: int
+	// RequirementLevel: Recommended (If different than `server.port`.)
+	// Stability: stable
+	// Examples: 16456
+	ServerSocketPortKey = attribute.Key("server.socket.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the logical server
+// hostname, matches server FQDN if available, and IP or socket address if FQDN
+// is not known.
+func ServerAddress(val string) attribute.KeyValue {
+	return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the logical server port number
+func ServerPort(val int) attribute.KeyValue {
+	return ServerPortKey.Int(val)
+}
+
+// ServerSocketDomain returns an attribute KeyValue conforming to the
+// "server.socket.domain" semantic conventions. It represents the domain name
+// of an immediate peer.
+func ServerSocketDomain(val string) attribute.KeyValue {
+	return ServerSocketDomainKey.String(val)
+}
+
+// ServerSocketAddress returns an attribute KeyValue conforming to the
+// "server.socket.address" semantic conventions. It represents the physical
+// server IP address or Unix socket address. If set from the client, should
+// simply use the socket's peer address, and not attempt to find any actual
+// server IP (i.e., if set from client, this may represent some proxy server
+// instead of the logical server).
+func ServerSocketAddress(val string) attribute.KeyValue {
+	return ServerSocketAddressKey.String(val)
+}
+
+// ServerSocketPort returns an attribute KeyValue conforming to the
+// "server.socket.port" semantic conventions. It represents the physical server
+// port.
+func ServerSocketPort(val int) attribute.KeyValue {
+	return ServerSocketPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+	// SourceDomainKey is the attribute Key conforming to the "source.domain"
+	// semantic conventions. It represents the domain name of the source
+	// system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'foo.example.com'
+	// Note: This value may be a host name, a fully qualified domain name, or
+	// another host naming format.
+	SourceDomainKey = attribute.Key("source.domain")
+
+	// SourceAddressKey is the attribute Key conforming to the "source.address"
+	// semantic conventions. It represents the source address, for example IP
+	// address or Unix socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10.5.3.2'
+	SourceAddressKey = attribute.Key("source.address")
+
+	// SourcePortKey is the attribute Key conforming to the "source.port"
+	// semantic conventions. It represents the source port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3389, 2888
+	SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceDomain returns an attribute KeyValue conforming to the
+// "source.domain" semantic conventions. It represents the domain name of the
+// source system.
+func SourceDomain(val string) attribute.KeyValue {
+	return SourceDomainKey.String(val)
+}
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address, for
+// example IP address or Unix socket name.
+func SourceAddress(val string) attribute.KeyValue {
+	return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+	return SourcePortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetworkTransportKey is the attribute Key conforming to the
+	// "network.transport" semantic conventions. It represents the [OSI
+	// Transport Layer](https://osi-model.com/transport-layer/) or
+	// [Inter-process Communication
+	// method](https://en.wikipedia.org/wiki/Inter-process_communication). The
+	// value SHOULD be normalized to lowercase.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'tcp', 'udp'
+	NetworkTransportKey = attribute.Key("network.transport")
+
+	// NetworkTypeKey is the attribute Key conforming to the "network.type"
+	// semantic conventions. It represents the [OSI Network
+	// Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The
+	// value SHOULD be normalized to lowercase.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ipv4', 'ipv6'
+	NetworkTypeKey = attribute.Key("network.type")
+
+	// NetworkProtocolNameKey is the attribute Key conforming to the
+	// "network.protocol.name" semantic conventions. It represents the [OSI
+	// Application Layer](https://osi-model.com/application-layer/) or non-OSI
+	// equivalent. The value SHOULD be normalized to lowercase.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'amqp', 'http', 'mqtt'
+	NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+	// NetworkProtocolVersionKey is the attribute Key conforming to the
+	// "network.protocol.version" semantic conventions. It represents the
+	// version of the application layer protocol used. See note below.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '3.1.1'
+	// Note: `network.protocol.version` refers to the version of the protocol
+	// used and might be different from the protocol client's version. If the
+	// HTTP client used has a version of `0.27.2`, but sends HTTP version
+	// `1.1`, this attribute should be set to `1.1`.
+	NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+)
+
+var (
+	// TCP
+	NetworkTransportTCP = NetworkTransportKey.String("tcp")
+	// UDP
+	NetworkTransportUDP = NetworkTransportKey.String("udp")
+	// Named or anonymous pipe. See note below
+	NetworkTransportPipe = NetworkTransportKey.String("pipe")
+	// Unix domain socket
+	NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+	// IPv4
+	NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+	// IPv6
+	NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// Application Layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent. The value SHOULD be normalized to lowercase.
+func NetworkProtocolName(val string) attribute.KeyValue {
+	return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+	return NetworkProtocolVersionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetworkConnectionTypeKey is the attribute Key conforming to the
+	// "network.connection.type" semantic conventions. It represents the
+	// internet connection type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'wifi'
+	NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+	// NetworkConnectionSubtypeKey is the attribute Key conforming to the
+	// "network.connection.subtype" semantic conventions. It represents the
+	// this describes more details regarding the connection.type. It may be the
+	// type of cell technology connection, but it could be used for describing
+	// details about a wifi connection.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'LTE'
+	NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+	// NetworkCarrierNameKey is the attribute Key conforming to the
+	// "network.carrier.name" semantic conventions. It represents the name of
+	// the mobile carrier.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'sprint'
+	NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+	// NetworkCarrierMccKey is the attribute Key conforming to the
+	// "network.carrier.mcc" semantic conventions. It represents the mobile
+	// carrier country code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '310'
+	NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+	// NetworkCarrierMncKey is the attribute Key conforming to the
+	// "network.carrier.mnc" semantic conventions. It represents the mobile
+	// carrier network code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '001'
+	NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+	// NetworkCarrierIccKey is the attribute Key conforming to the
+	// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+	// alpha-2 2-character country code associated with the mobile carrier
+	// network.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'DE'
+	NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+)
+
+var (
+	// wifi
+	NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+	// wired
+	NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+	// cell
+	NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+	// unavailable
+	NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+	// unknown
+	NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+	// GPRS
+	NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+	return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+	return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+	return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+	return NetworkCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+	// HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+	// "http.request.method_original" semantic conventions. It represents the
+	// original HTTP method sent by the client in the request line.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If and only if it's different
+	// than `http.request.method`.)
+	// Stability: stable
+	// Examples: 'GeT', 'ACL', 'foo'
+	HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+	// HTTPRequestBodySizeKey is the attribute Key conforming to the
+	// "http.request.body.size" semantic conventions. It represents the size of
+	// the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3495
+	HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+	// HTTPResponseBodySizeKey is the attribute Key conforming to the
+	// "http.response.body.size" semantic conventions. It represents the size
+	// of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3495
+	HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+)
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+	return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+	return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+	return HTTPResponseBodySizeKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+	// MessagingMessageIDKey is the attribute Key conforming to the
+	// "messaging.message.id" semantic conventions. It represents a value used
+	// by the messaging system as an identifier for the message, represented as
+	// a string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+	// MessagingMessageConversationIDKey is the attribute Key conforming to the
+	// "messaging.message.conversation_id" semantic conventions. It represents
+	// the [conversation ID](#conversations) identifying the conversation to
+	// which the message belongs, represented as a string. Sometimes called
+	// "Correlation ID".
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MyConversationID'
+	MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+	// MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+	// the "messaging.message.payload_size_bytes" semantic conventions. It
+	// represents the (uncompressed) size of the message payload in bytes. Also
+	// use this attribute if it is unknown whether the compressed or
+	// uncompressed payload size is reported.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2738
+	MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+	// MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+	// conforming to the "messaging.message.payload_compressed_size_bytes"
+	// semantic conventions. It represents the compressed size of the message
+	// payload in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2048
+	MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+	return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+	return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+	return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+	return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+	// MessagingDestinationNameKey is the attribute Key conforming to the
+	// "messaging.destination.name" semantic conventions. It represents the
+	// message destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: Destination name SHOULD uniquely identify a specific queue, topic
+	// or other entity within the broker. If
+	// the broker does not have such notion, the destination name SHOULD
+	// uniquely identify the broker.
+	MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+	// MessagingDestinationTemplateKey is the attribute Key conforming to the
+	// "messaging.destination.template" semantic conventions. It represents the
+	// low cardinality representation of the messaging destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/customers/{customerID}'
+	// Note: Destination names could be constructed from templates. An example
+	// would be a destination name involving a user name or product id.
+	// Although the destination name in this case is of high cardinality, the
+	// underlying template is of low cardinality and can be effectively used
+	// for grouping and aggregation.
+	MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+	// MessagingDestinationTemporaryKey is the attribute Key conforming to the
+	// "messaging.destination.temporary" semantic conventions. It represents a
+	// boolean that is true if the message destination is temporary and might
+	// not exist anymore after messages are processed.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+	// MessagingDestinationAnonymousKey is the attribute Key conforming to the
+	// "messaging.destination.anonymous" semantic conventions. It represents a
+	// boolean that is true if the message destination is anonymous (could be
+	// unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+	return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+	return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+	return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+	// MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+	// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+	// conventions. It represents the rabbitMQ message routing key.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If not empty.)
+	// Stability: stable
+	// Examples: 'myKey'
+	MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+	return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+	// MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+	// "messaging.kafka.message.key" semantic conventions. It represents the
+	// message keys in Kafka are used for grouping alike messages to ensure
+	// they're processed on the same partition. They differ from
+	// `messaging.message.id` in that they're not unique. If the key is `null`,
+	// the attribute MUST NOT be set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to
+	// be supplied for the attribute. If the key has no unambiguous, canonical
+	// string form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+	// MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+	// "messaging.kafka.consumer.group" semantic conventions. It represents the
+	// name of the Kafka Consumer Group that is handling the message. Only
+	// applies to consumers, not producers.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+	// MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+	// the "messaging.kafka.destination.partition" semantic conventions. It
+	// represents the partition the message is sent to.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2
+	MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+	// MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+	// "messaging.kafka.message.offset" semantic conventions. It represents the
+	// offset of a record in the corresponding Kafka partition.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 42
+	MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+	// MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+	// "messaging.kafka.message.tombstone" semantic conventions. It represents
+	// a boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// RequirementLevel: ConditionallyRequired (If value is `true`. When
+	// missing, the value is assumed to be `false`.)
+	// Stability: stable
+	MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+	return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+	return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+	return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+	return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+	return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+	// MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+	// "messaging.rocketmq.namespace" semantic conventions. It represents the
+	// namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+	// MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.client_group" semantic conventions. It represents
+	// the name of the RocketMQ producer/consumer group that is handling the
+	// message. The client type is identified by the SpanKind.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+	// MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delivery_timestamp"
+	// semantic conventions. It represents the timestamp in milliseconds that
+	// the delay message is expected to be delivered to consumer.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the message type is delay
+	// and delay time level is not specified.)
+	// Stability: stable
+	// Examples: 1665987217045
+	MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+	// MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+	// conventions. It represents the delay time level for delay message, which
+	// determines the message delay time.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the message type is delay
+	// and delivery timestamp is not specified.)
+	// Stability: stable
+	// Examples: 3
+	MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+	// MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.group" semantic conventions. It represents
+	// the it is essential for FIFO message. Messages that belong to the same
+	// message group are always processed one by one within the same consumer
+	// group.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+	// Stability: stable
+	// Examples: 'myMessageGroup'
+	MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+	// MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.type" semantic conventions. It represents
+	// the type of message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+	// MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+	// secondary classifier of message besides topic.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+	// MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.keys" semantic conventions. It represents
+	// the key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+	// MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+	// the "messaging.rocketmq.consumption_model" semantic conventions. It
+	// represents the model of message consumption. This only applies to
+	// consumer spans.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+	return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+	return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Attributes describing URL.
+const (
+	// URLSchemeKey is the attribute Key conforming to the "url.scheme"
+	// semantic conventions. It represents the [URI
+	// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+	// identifying the used protocol.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'https', 'ftp', 'telnet'
+	URLSchemeKey = attribute.Key("url.scheme")
+
+	// URLFullKey is the attribute Key conforming to the "url.full" semantic
+	// conventions. It represents the absolute URL describing a network
+	// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+	// '//localhost'
+	// Note: For network calls, URL usually has
+	// `scheme://host[:port][path][?query][#fragment]` format, where the
+	// fragment is not transmitted over HTTP, but if it is known, it should be
+	// included nevertheless.
+	// `url.full` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case username and
+	// password should be redacted and attribute's value should be
+	// `https://REDACTED:REDACTED@www.example.com/`.
+	// `url.full` SHOULD capture the absolute URL when it is available (or can
+	// be reconstructed) and SHOULD NOT be validated or modified except for
+	// sanitizing purposes.
+	URLFullKey = attribute.Key("url.full")
+
+	// URLPathKey is the attribute Key conforming to the "url.path" semantic
+	// conventions. It represents the [URI
+	// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/search'
+	// Note: When missing, the value is assumed to be `/`
+	URLPathKey = attribute.Key("url.path")
+
+	// URLQueryKey is the attribute Key conforming to the "url.query" semantic
+	// conventions. It represents the [URI
+	// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'q=OpenTelemetry'
+	// Note: Sensitive content provided in query string SHOULD be scrubbed when
+	// instrumentations can identify it.
+	URLQueryKey = attribute.Key("url.query")
+
+	// URLFragmentKey is the attribute Key conforming to the "url.fragment"
+	// semantic conventions. It represents the [URI
+	// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'SemConv'
+	URLFragmentKey = attribute.Key("url.fragment")
+)
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+	return URLSchemeKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+	return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+	return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+	return URLQueryKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+	return URLFragmentKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+	// UserAgentOriginalKey is the attribute Key conforming to the
+	// "user_agent.original" semantic conventions. It represents the value of
+	// the [HTTP
+	// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+	// header sent by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+	UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+	return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..0318b5ec48fa2a65037a67db60fab43426ae5b81
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.21.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
new file mode 100644
index 0000000000000000000000000000000000000000..30ae34fe478211e88c0d47282a4a99064a78a6fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+	// FeatureFlagKeyKey is the attribute Key conforming to the
+	// "feature_flag.key" semantic conventions. It represents the unique
+	// identifier of the feature flag.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'logo-color'
+	FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+	// FeatureFlagProviderNameKey is the attribute Key conforming to the
+	// "feature_flag.provider_name" semantic conventions. It represents the
+	// name of the service provider that performs the flag evaluation.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'Flag Manager'
+	FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+	// FeatureFlagVariantKey is the attribute Key conforming to the
+	// "feature_flag.variant" semantic conventions. It represents the sHOULD be
+	// a semantic identifier for a value. If one is unavailable, a stringified
+	// version of the value can be used.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'red', 'true', 'on'
+	// Note: A semantic identifier, commonly referred to as a variant, provides
+	// a means
+	// for referring to a value without including the value itself. This can
+	// provide additional context for understanding the meaning behind a value.
+	// For example, the variant `red` maybe be used for the value `#c05543`.
+	//
+	// A stringified version of the value can be used in situations where a
+	// semantic identifier is unavailable. String representation of the value
+	// should be determined by the implementer.
+	FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+	return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+	return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+	return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+	// MessageTypeKey is the attribute Key conforming to the "message.type"
+	// semantic conventions. It represents the whether this is a received or
+	// sent message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessageTypeKey = attribute.Key("message.type")
+
+	// MessageIDKey is the attribute Key conforming to the "message.id"
+	// semantic conventions. It represents the mUST be calculated as two
+	// different counters starting from `1` one for sent messages and one for
+	// received message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	MessageIDKey = attribute.Key("message.id")
+
+	// MessageCompressedSizeKey is the attribute Key conforming to the
+	// "message.compressed_size" semantic conventions. It represents the
+	// compressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+	// MessageUncompressedSizeKey is the attribute Key conforming to the
+	// "message.uncompressed_size" semantic conventions. It represents the
+	// uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+	// sent
+	MessageTypeSent = MessageTypeKey.String("SENT")
+	// received
+	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+	return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+	return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+	return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+	// ExceptionEscapedKey is the attribute Key conforming to the
+	// "exception.escaped" semantic conventions. It represents the sHOULD be
+	// set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: An exception is considered to have escaped (or left) the scope of
+	// a span,
+	// if that span is ended while the exception is still logically "in
+	// flight".
+	// This may be actually "in flight" in some languages (e.g. if the
+	// exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most
+	// languages.
+	//
+	// It is usually not possible to determine at the point where an exception
+	// is thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending
+	// the span,
+	// as done in the [example above](#recording-an-exception).
+	//
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+	return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
new file mode 100644
index 0000000000000000000000000000000000000000..93d3c1760c92f40981690cbbc6e3890aa86ca6ca
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+const (
+	// ExceptionEventName is the name of the Span event representing an exception.
+	ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
new file mode 100644
index 0000000000000000000000000000000000000000..b6d8935cf9736bc992d4a2bfc66b3580fc7f4724
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
@@ -0,0 +1,2310 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+	// BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+	// semantic conventions. It represents the array of brand name and version
+	// separated by a space
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.brands`).
+	BrowserBrandsKey = attribute.Key("browser.brands")
+
+	// BrowserPlatformKey is the attribute Key conforming to the
+	// "browser.platform" semantic conventions. It represents the platform on
+	// which the browser is running
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Windows', 'macOS', 'Android'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.platform`). If unavailable, the legacy
+	// `navigator.platform` API SHOULD NOT be used instead and this attribute
+	// SHOULD be left unset in order for the values to be consistent.
+	// The list of possible values is defined in the [W3C User-Agent Client
+	// Hints
+	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+	// Note that some (but not all) of these values can overlap with values in
+	// the [`os.type` and `os.name` attributes](./os.md). However, for
+	// consistency, the values in the `browser.platform` attribute should
+	// capture the exact value that the user agent provides.
+	BrowserPlatformKey = attribute.Key("browser.platform")
+
+	// BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+	// semantic conventions. It represents a boolean that is true if the
+	// browser is running on a mobile device
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.mobile`). If unavailable, this attribute
+	// SHOULD be left unset.
+	BrowserMobileKey = attribute.Key("browser.mobile")
+
+	// BrowserLanguageKey is the attribute Key conforming to the
+	// "browser.language" semantic conventions. It represents the preferred
+	// language of the user using the browser
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'en', 'en-US', 'fr', 'fr-FR'
+	// Note: This value is intended to be taken from the Navigator API
+	// `navigator.language`.
+	BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+	return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+	return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+	return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+	return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+	// CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+	// semantic conventions. It represents the name of the cloud provider.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	CloudProviderKey = attribute.Key("cloud.provider")
+
+	// CloudAccountIDKey is the attribute Key conforming to the
+	// "cloud.account.id" semantic conventions. It represents the cloud account
+	// ID the resource is assigned to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+	// CloudRegionKey is the attribute Key conforming to the "cloud.region"
+	// semantic conventions. It represents the geographical region the resource
+	// is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for
+	// example [Alibaba Cloud
+	// regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+	// regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+	// [Azure
+	// regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+	// [Google Cloud regions](https://cloud.google.com/about/locations), or
+	// [Tencent Cloud
+	// regions](https://www.tencentcloud.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+
+	// CloudResourceIDKey is the attribute Key conforming to the
+	// "cloud.resource_id" semantic conventions. It represents the cloud
+	// provider-specific native identifier of the monitored cloud resource
+	// (e.g. an
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+	// on AWS, a [fully qualified resource
+	// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+	// on Azure, a [full resource
+	// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+	// on GCP)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+	// '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+	// '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+	// Note: On some cloud providers, it may not be possible to determine the
+	// full ID at startup,
+	// so it may be necessary to set `cloud.resource_id` as a span attribute
+	// instead.
+	//
+	// The exact value to use for `cloud.resource_id` depends on the cloud
+	// provider.
+	// The following well-known definitions MUST be used if you set this
+	// attribute and they apply:
+	//
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+	//   Take care not to use the "invoked ARN" directly but replace any
+	//   [alias
+	// suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+	//   with the resolved function version, as the same runtime instance may
+	// be invokable with
+	//   multiple different aliases.
+	// * **GCP:** The [URI of the
+	// resource](https://cloud.google.com/iam/docs/full-resource-names)
+	// * **Azure:** The [Fully Qualified Resource
+	// ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+	// of the invoked function,
+	//   *not* the function app, having the form
+	// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+	//   This means that a span attribute MUST be used, as an Azure function
+	// app can host multiple functions that would usually share
+	//   a TracerProvider.
+	CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+	// CloudAvailabilityZoneKey is the attribute Key conforming to the
+	// "cloud.availability_zone" semantic conventions. It represents the cloud
+	// regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the
+	// resource is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google
+	// Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+	// CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+	// semantic conventions. It represents the cloud platform in use.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// Heroku Platform as a Service
+	CloudProviderHeroku = CloudProviderKey.String("heroku")
+	// IBM Cloud
+	CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// Red Hat OpenShift on Alibaba Cloud
+	CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Red Hat OpenShift on AWS (ROSA)
+	CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Azure Red Hat OpenShift
+	CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+	// Google Bare Metal Solution (BMS)
+	CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Red Hat OpenShift on Google Cloud
+	CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+	// Red Hat OpenShift on IBM Cloud
+	CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+	return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+	return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+	return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+	return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+	// AWSECSContainerARNKey is the attribute Key conforming to the
+	// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+	// Resource Name (ARN) of an [ECS container
+	// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+	// AWSECSClusterARNKey is the attribute Key conforming to the
+	// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+	// [ECS
+	// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+	// AWSECSLaunchtypeKey is the attribute Key conforming to the
+	// "aws.ecs.launchtype" semantic conventions. It represents the [launch
+	// type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+	// for an ECS task.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+	// AWSECSTaskARNKey is the attribute Key conforming to the
+	// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+	// [ECS task
+	// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+	// AWSECSTaskFamilyKey is the attribute Key conforming to the
+	// "aws.ecs.task.family" semantic conventions. It represents the task
+	// definition family this task definition is a member of.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+	// AWSECSTaskRevisionKey is the attribute Key conforming to the
+	// "aws.ecs.task.revision" semantic conventions. It represents the revision
+	// for this task definition.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+	return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+	return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+	return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+	return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+	return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+	// AWSEKSClusterARNKey is the attribute Key conforming to the
+	// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+	// EKS cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+	return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+	// AWSLogGroupNamesKey is the attribute Key conforming to the
+	// "aws.log.group.names" semantic conventions. It represents the name(s) of
+	// the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like
+	// multi-container applications, where a single application has sidecar
+	// containers, and each write to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+	// AWSLogGroupARNsKey is the attribute Key conforming to the
+	// "aws.log.group.arns" semantic conventions. It represents the Amazon
+	// Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+	// AWSLogStreamNamesKey is the attribute Key conforming to the
+	// "aws.log.stream.names" semantic conventions. It represents the name(s)
+	// of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+	// AWSLogStreamARNsKey is the attribute Key conforming to the
+	// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+	// the AWS log stream(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	// One log group can contain several log streams, so these ARNs necessarily
+	// identify both a log group and a log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+	return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+	return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+	return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+	return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+	// GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.execution" semantic conventions. It represents the
+	// name of the Cloud Run
+	// [execution](https://cloud.google.com/run/docs/managing/job-executions)
+	// being run for the Job, as set by the
+	// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'job-name-xxxx', 'sample-job-mdw84'
+	GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+	// GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+	// index for a task within an execution as provided by the
+	// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 0, 1
+	GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+	return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+	return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+	// GCPGceInstanceNameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.name" semantic conventions. It represents the instance
+	// name of a GCE instance. This is the value provided by `host.name`, the
+	// visible name of the instance in the Cloud Console UI, and the prefix for
+	// the default hostname of the instance as defined by the [default internal
+	// DNS
+	// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'instance-1', 'my-vm-name'
+	GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+
+	// GCPGceInstanceHostnameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.hostname" semantic conventions. It represents the
+	// hostname of a GCE instance. This is the full value of the default or
+	// [custom
+	// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'my-host1234.example.com',
+	// 'sample-vm.us-west1-b.c.my-project.internal'
+	GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+)
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+	return GCPGceInstanceNameKey.String(val)
+}
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+	return GCPGceInstanceHostnameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+	// HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+	// "heroku.release.creation_timestamp" semantic conventions. It represents
+	// the time and date the release was created
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2022-10-23T18:00:42Z'
+	HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+	// HerokuReleaseCommitKey is the attribute Key conforming to the
+	// "heroku.release.commit" semantic conventions. It represents the commit
+	// hash for the current release
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+	HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+	// HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+	// semantic conventions. It represents the unique identifier for the
+	// application
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+	HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+	return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+	return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+	return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+	// ContainerNameKey is the attribute Key conforming to the "container.name"
+	// semantic conventions. It represents the container name used by container
+	// runtime.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+
+	// ContainerIDKey is the attribute Key conforming to the "container.id"
+	// semantic conventions. It represents the container ID. Usually a UUID, as
+	// for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+	// The UUID might be abbreviated.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+
+	// ContainerRuntimeKey is the attribute Key conforming to the
+	// "container.runtime" semantic conventions. It represents the container
+	// runtime managing this container.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+
+	// ContainerImageNameKey is the attribute Key conforming to the
+	// "container.image.name" semantic conventions. It represents the name of
+	// the image the container was built on.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+
+	// ContainerImageTagKey is the attribute Key conforming to the
+	// "container.image.tag" semantic conventions. It represents the container
+	// image tag.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '0.1'
+	ContainerImageTagKey = attribute.Key("container.image.tag")
+
+	// ContainerImageIDKey is the attribute Key conforming to the
+	// "container.image.id" semantic conventions. It represents the runtime
+	// specific image identifier. Usually a hash algorithm followed by a UUID.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+	// Note: Docker defines a sha256 of the image id; `container.image.id`
+	// corresponds to the `Image` field from the Docker container inspect
+	// [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+	// endpoint.
+	// K8S defines a link to the container registry repository with digest
+	// `"imageID": "registry.azurecr.io
+	// /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+	// OCI defines a digest of manifest.
+	ContainerImageIDKey = attribute.Key("container.image.id")
+
+	// ContainerCommandKey is the attribute Key conforming to the
+	// "container.command" semantic conventions. It represents the command used
+	// to run the container (i.e. the command name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'otelcontribcol'
+	// Note: If using embedded credentials or sensitive data, it is recommended
+	// to remove them to prevent potential leakage.
+	ContainerCommandKey = attribute.Key("container.command")
+
+	// ContainerCommandLineKey is the attribute Key conforming to the
+	// "container.command_line" semantic conventions. It represents the full
+	// command run by the container as a single string representing the full
+	// command. [2]
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'otelcontribcol --config config.yaml'
+	ContainerCommandLineKey = attribute.Key("container.command_line")
+
+	// ContainerCommandArgsKey is the attribute Key conforming to the
+	// "container.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) run by the
+	// container. [2]
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'otelcontribcol, --config, config.yaml'
+	ContainerCommandArgsKey = attribute.Key("container.command_args")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+	return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+	return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+	return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+	return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+	return ContainerImageTagKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+	return ContainerImageIDKey.String(val)
+}
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+	return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+	return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+	return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// The software deployment.
+const (
+	// DeploymentEnvironmentKey is the attribute Key conforming to the
+	// "deployment.environment" semantic conventions. It represents the name of
+	// the [deployment
+	// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'staging', 'production'
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+	return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+	// DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+	// conventions. It represents a unique identifier representing the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values
+	// outlined below. This value is not an advertising identifier and MUST NOT
+	// be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+	// to the [vendor
+	// identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+	// On Android (Java or Kotlin), this value MUST be equal to the Firebase
+	// Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on
+	// best practices and exact implementation details. Caution should be taken
+	// when storing personal data or anything which can identify a user. GDPR
+	// and data protection laws may apply, ensure you do your own due
+	// diligence.
+	DeviceIDKey = attribute.Key("device.id")
+
+	// DeviceModelIdentifierKey is the attribute Key conforming to the
+	// "device.model.identifier" semantic conventions. It represents the model
+	// identifier for the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine readable version
+	// of the model identifier rather than the market or consumer-friendly name
+	// of the device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+	// DeviceModelNameKey is the attribute Key conforming to the
+	// "device.model.name" semantic conventions. It represents the marketing
+	// name for the device model
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human readable version of
+	// the device model rather than a machine readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+
+	// DeviceManufacturerKey is the attribute Key conforming to the
+	// "device.manufacturer" semantic conventions. It represents the name of
+	// the device manufacturer
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+	return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+	return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+	return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+	return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+	// FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+	// conventions. It represents the name of the single function that this
+	// runtime instance executes.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+	// Note: This is the name of the function as configured/deployed on the
+	// FaaS
+	// platform and is usually different from the name of the callback
+	// function (which may be stored in the
+	// [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes)
+	// span attributes).
+	//
+	// For some cloud providers, the above definition is ambiguous. The
+	// following
+	// definition of function name MUST be used for this attribute
+	// (and consequently the span name) for the listed cloud
+	// providers/products:
+	//
+	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+	//   followed by a forward slash followed by the function name (this form
+	//   can also be seen in the resource JSON for the function).
+	//   This means that a span attribute MUST be used, as an Azure function
+	//   app can host multiple functions that would usually share
+	//   a TracerProvider (see also the `cloud.resource_id` attribute).
+	FaaSNameKey = attribute.Key("faas.name")
+
+	// FaaSVersionKey is the attribute Key conforming to the "faas.version"
+	// semantic conventions. It represents the immutable version of the
+	// function being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+	//
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run (Services):** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+
+	// FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+	// semantic conventions. It represents the execution environment ID as a
+	// string, that will be potentially reused for other invocations to the
+	// same function/function version.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+
+	// FaaSMaxMemoryKey is the attribute Key conforming to the
+	// "faas.max_memory" semantic conventions. It represents the amount of
+	// memory available to the serverless function converted to Bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 134217728
+	// Note: It's recommended to set this attribute since e.g. too little
+	// memory can easily stop a Java AWS Lambda function from working
+	// correctly. On AWS Lambda, the environment variable
+	// `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+	// be multiplied by 1,048,576).
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+	return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+	return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+	return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+	return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+	// HostIDKey is the attribute Key conforming to the "host.id" semantic
+	// conventions. It represents the unique host ID. For Cloud, this must be
+	// the instance_id assigned by the cloud provider. For non-containerized
+	// systems, this should be the `machine-id`. See the table below for the
+	// sources to use to determine the `machine-id` based on operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+	HostIDKey = attribute.Key("host.id")
+
+	// HostNameKey is the attribute Key conforming to the "host.name" semantic
+	// conventions. It represents the name of the host. On Unix systems, it may
+	// contain what the hostname command returns, or the fully qualified
+	// hostname, or another name specified by the user.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+
+	// HostTypeKey is the attribute Key conforming to the "host.type" semantic
+	// conventions. It represents the type of host. For Cloud, this must be the
+	// machine type.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+
+	// HostArchKey is the attribute Key conforming to the "host.arch" semantic
+	// conventions. It represents the CPU architecture the host system is
+	// running on.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	HostArchKey = attribute.Key("host.arch")
+
+	// HostImageNameKey is the attribute Key conforming to the
+	// "host.image.name" semantic conventions. It represents the name of the VM
+	// image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+
+	// HostImageIDKey is the attribute Key conforming to the "host.image.id"
+	// semantic conventions. It represents the vM image ID or host OS image ID.
+	// For Cloud, this value is from the provider.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+
+	// HostImageVersionKey is the attribute Key conforming to the
+	// "host.image.version" semantic conventions. It represents the version
+	// string of the VM image or host OS as defined in [Version
+	// Attributes](README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+	return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+	return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+	return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+	return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+	return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+	return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+	// K8SClusterNameKey is the attribute Key conforming to the
+	// "k8s.cluster.name" semantic conventions. It represents the name of the
+	// cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+	// K8SClusterUIDKey is the attribute Key conforming to the
+	// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+	// the cluster, set to the UID of the `kube-system` namespace.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+	// Note: K8S does not have support for obtaining a cluster ID. If this is
+	// ever
+	// added, we will recommend collecting the `k8s.cluster.uid` through the
+	// official APIs. In the meantime, we are able to use the `uid` of the
+	// `kube-system` namespace as a proxy for cluster ID. Read on for the
+	// rationale.
+	//
+	// Every object created in a K8S cluster is assigned a distinct UID. The
+	// `kube-system` namespace is used by Kubernetes itself and will exist
+	// for the lifetime of the cluster. Using the `uid` of the `kube-system`
+	// namespace is a reasonable proxy for the K8S ClusterID as it will only
+	// change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+	// UUIDs as standardized by
+	// [ISO/IEC 9834-8 and ITU-T
+	// X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+	// Which states:
+	//
+	// > If generated according to one of the mechanisms defined in Rec.
+	//   ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+	//   different from all other UUIDs generated before 3603 A.D., or is
+	//   extremely likely to be different (depending on the mechanism chosen).
+	//
+	// Therefore, UIDs between clusters should be extremely unlikely to
+	// conflict.
+	K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+	return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+	return K8SClusterUIDKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+	// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+	// semantic conventions. It represents the name of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+	// K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+	// semantic conventions. It represents the UID of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+	return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+	return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+	// K8SNamespaceNameKey is the attribute Key conforming to the
+	// "k8s.namespace.name" semantic conventions. It represents the name of the
+	// namespace that the pod is running in.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+	return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+	// K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+	// semantic conventions. It represents the UID of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+	// K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+	// semantic conventions. It represents the name of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+	return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+	return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+	// K8SContainerNameKey is the attribute Key conforming to the
+	// "k8s.container.name" semantic conventions. It represents the name of the
+	// Container from Pod specification, must be unique within a Pod. Container
+	// runtime usually uses different globally unique name (`container.name`).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+	// K8SContainerRestartCountKey is the attribute Key conforming to the
+	// "k8s.container.restart_count" semantic conventions. It represents the
+	// number of times the container was restarted. This attribute can be used
+	// to identify a particular container (running or stopped) within a
+	// container spec.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 0, 2
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+	return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+	return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+	// K8SReplicaSetUIDKey is the attribute Key conforming to the
+	// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+	// ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+	// K8SReplicaSetNameKey is the attribute Key conforming to the
+	// "k8s.replicaset.name" semantic conventions. It represents the name of
+	// the ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+	return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+	return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+	// K8SDeploymentUIDKey is the attribute Key conforming to the
+	// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+	// Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+	// K8SDeploymentNameKey is the attribute Key conforming to the
+	// "k8s.deployment.name" semantic conventions. It represents the name of
+	// the Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+	return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+	return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+	// K8SStatefulSetUIDKey is the attribute Key conforming to the
+	// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+	// StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+	// K8SStatefulSetNameKey is the attribute Key conforming to the
+	// "k8s.statefulset.name" semantic conventions. It represents the name of
+	// the StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+	return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+	return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+	// K8SDaemonSetUIDKey is the attribute Key conforming to the
+	// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+	// K8SDaemonSetNameKey is the attribute Key conforming to the
+	// "k8s.daemonset.name" semantic conventions. It represents the name of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+	return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+	return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+	// K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+	// semantic conventions. It represents the UID of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+	// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+	// semantic conventions. It represents the name of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+	return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+	return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+	// K8SCronJobUIDKey is the attribute Key conforming to the
+	// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+	// K8SCronJobNameKey is the attribute Key conforming to the
+	// "k8s.cronjob.name" semantic conventions. It represents the name of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+	return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+	return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+	// OSTypeKey is the attribute Key conforming to the "os.type" semantic
+	// conventions. It represents the operating system type.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	OSTypeKey = attribute.Key("os.type")
+
+	// OSDescriptionKey is the attribute Key conforming to the "os.description"
+	// semantic conventions. It represents the human readable (not intended to
+	// be parsed) OS version information, like e.g. reported by `ver` or
+	// `lsb_release -a` commands.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+	// LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+
+	// OSNameKey is the attribute Key conforming to the "os.name" semantic
+	// conventions. It represents the human readable operating system name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+
+	// OSVersionKey is the attribute Key conforming to the "os.version"
+	// semantic conventions. It represents the version string of the operating
+	// system as defined in [Version
+	// Attributes](/docs/resource/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// SunOS, Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+	return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+	return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+	return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+	// ProcessPIDKey is the attribute Key conforming to the "process.pid"
+	// semantic conventions. It represents the process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+
+	// ProcessParentPIDKey is the attribute Key conforming to the
+	// "process.parent_pid" semantic conventions. It represents the parent
+	// Process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 111
+	ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+	// ProcessExecutableNameKey is the attribute Key conforming to the
+	// "process.executable.name" semantic conventions. It represents the name
+	// of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+	// of `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+	// ProcessExecutablePathKey is the attribute Key conforming to the
+	// "process.executable.path" semantic conventions. It represents the full
+	// path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+	// ProcessCommandKey is the attribute Key conforming to the
+	// "process.command" semantic conventions. It represents the command used
+	// to launch the process (i.e. the command name). On Linux based systems,
+	// can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+	// be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+
+	// ProcessCommandLineKey is the attribute Key conforming to the
+	// "process.command_line" semantic conventions. It represents the full
+	// command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`.
+	// Do not set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+
+	// ProcessCommandArgsKey is the attribute Key conforming to the
+	// "process.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited
+	// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+	// this would be the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+	// ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+	// semantic conventions. It represents the username of the user that owns
+	// the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+	return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+	return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+	return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+	return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+	return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+	return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+	return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+	return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+	// ProcessRuntimeNameKey is the attribute Key conforming to the
+	// "process.runtime.name" semantic conventions. It represents the name of
+	// the runtime of this process. For compiled native binaries, this SHOULD
+	// be the name of the compiler.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+	// ProcessRuntimeVersionKey is the attribute Key conforming to the
+	// "process.runtime.version" semantic conventions. It represents the
+	// version of the runtime of this process, as returned by the runtime
+	// without modification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+	// ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+	// "process.runtime.description" semantic conventions. It represents an
+	// additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+	return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+	return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+	return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceNameKey is the attribute Key conforming to the "service.name"
+	// semantic conventions. It represents the logical name of the service.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled
+	// services. If the value was not specified, SDKs MUST fallback to
+	// `unknown_service:` concatenated with
+	// [`process.executable.name`](process.md#process), e.g.
+	// `unknown_service:bash`. If `process.executable.name` is not available,
+	// the value MUST be set to `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+
+	// ServiceVersionKey is the attribute Key conforming to the
+	// "service.version" semantic conventions. It represents the version string
+	// of the service API or implementation. The format is not defined by these
+	// conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2.0.0', 'a01dbef8a'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+	return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+	return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceNamespaceKey is the attribute Key conforming to the
+	// "service.namespace" semantic conventions. It represents a namespace for
+	// `service.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group
+	// of services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name`
+	// is expected to be unique for all services that have no explicit
+	// namespace defined (so the empty/unspecified namespace is simply one more
+	// valid namespace). Zero-length namespace string is assumed equal to
+	// unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+
+	// ServiceInstanceIDKey is the attribute Key conforming to the
+	// "service.instance.id" semantic conventions. It represents the string ID
+	// of the service instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'my-k8s-pod-deployment-1',
+	// '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be
+	// globally unique). The ID helps to distinguish instances of the same
+	// service that exist at the same time (e.g. instances of a horizontally
+	// scaled service). It is preferable for the ID to be persistent and stay
+	// the same for the lifetime of the service instance, however it is
+	// acceptable that the ID is ephemeral and changes during important
+	// lifetime events for the service (e.g. service restarts). If the service
+	// has no inherent unique ID that can be used as the value of this
+	// attribute it is recommended to generate a random Version 1 or Version 4
+	// RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+	// Version 5, see RFC 4122 for more recommendations).
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+	return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+	return ServiceInstanceIDKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetrySDKNameKey is the attribute Key conforming to the
+	// "telemetry.sdk.name" semantic conventions. It represents the name of the
+	// telemetry SDK as defined above.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	// Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+	// to `opentelemetry`.
+	// If another SDK, like a fork or a vendor-provided implementation, is
+	// used, this SDK MUST set the
+	// `telemetry.sdk.name` attribute to the fully-qualified class or module
+	// name of this SDK's main entry point
+	// or another suitable identifier depending on the language.
+	// The identifier `opentelemetry` is reserved and MUST NOT be used in this
+	// case.
+	// All custom identifiers SHOULD be stable across different versions of an
+	// implementation.
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+	// TelemetrySDKLanguageKey is the attribute Key conforming to the
+	// "telemetry.sdk.language" semantic conventions. It represents the
+	// language of the telemetry SDK.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+	// TelemetrySDKVersionKey is the attribute Key conforming to the
+	// "telemetry.sdk.version" semantic conventions. It represents the version
+	// string of the telemetry SDK.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// rust
+	TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+	return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+	return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetryAutoVersionKey is the attribute Key conforming to the
+	// "telemetry.auto.version" semantic conventions. It represents the version
+	// string of the auto instrumentation agent, if used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+	return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+	// WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+	// semantic conventions. It represents the name of the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+
+	// WebEngineVersionKey is the attribute Key conforming to the
+	// "webengine.version" semantic conventions. It represents the version of
+	// the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+
+	// WebEngineDescriptionKey is the attribute Key conforming to the
+	// "webengine.description" semantic conventions. It represents the
+	// additional description of the web engine (e.g. detailed version and
+	// edition information).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+	// 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+	return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+	return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+	return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+	// OTelScopeNameKey is the attribute Key conforming to the
+	// "otel.scope.name" semantic conventions. It represents the name of the
+	// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+	// OTelScopeVersionKey is the attribute Key conforming to the
+	// "otel.scope.version" semantic conventions. It represents the version of
+	// the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.0.0'
+	OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+	return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+	return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+	// OTelLibraryNameKey is the attribute Key conforming to the
+	// "otel.library.name" semantic conventions. It represents the deprecated,
+	// use the `otel.scope.name` attribute.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+	// OTelLibraryVersionKey is the attribute Key conforming to the
+	// "otel.library.version" semantic conventions. It represents the
+	// deprecated, use the `otel.scope.version` attribute.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '1.0.0'
+	OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+	return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+	return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
new file mode 100644
index 0000000000000000000000000000000000000000..66ffd5989f341a358d3f23bffba36a5c0b1473fd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
new file mode 100644
index 0000000000000000000000000000000000000000..b5a91450d420787a0a9693b9909adc6ce35ac348
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
@@ -0,0 +1,2495 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+	// semantic conventions. It represents the type of the exception (its
+	// fully-qualified class name, if applicable). The dynamic type of the
+	// exception should be preferred over the static type in languages that
+	// support it.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+
+	// ExceptionMessageKey is the attribute Key conforming to the
+	// "exception.message" semantic conventions. It represents the exception
+	// message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Division by zero', "Can't convert 'int' object to str
+	// implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+
+	// ExceptionStacktraceKey is the attribute Key conforming to the
+	// "exception.stacktrace" semantic conventions. It represents a stacktrace
+	// as a string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+	return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+	return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+	return ExceptionStacktraceKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+	// AWSLambdaInvokedARNKey is the attribute Key conforming to the
+	// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+	// invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+	// `/runtime/invocation/next` applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `cloud.resource_id` if an alias is
+	// involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+	return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+	// CloudeventsEventIDKey is the attribute Key conforming to the
+	// "cloudevents.event_id" semantic conventions. It represents the
+	// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+	// uniquely identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+	// CloudeventsEventSourceKey is the attribute Key conforming to the
+	// "cloudevents.event_source" semantic conventions. It represents the
+	// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+	// identifies the context in which an event happened.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'https://github.com/cloudevents',
+	// '/cloudevents/spec/pull/123', 'my-service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+	// CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+	// "cloudevents.event_spec_version" semantic conventions. It represents the
+	// [version of the CloudEvents
+	// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+	// which the event uses.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+	// CloudeventsEventTypeKey is the attribute Key conforming to the
+	// "cloudevents.event_type" semantic conventions. It represents the
+	// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+	// contains a value describing the type of event related to the originating
+	// occurrence.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'com.github.pull_request.opened',
+	// 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+	// CloudeventsEventSubjectKey is the attribute Key conforming to the
+	// "cloudevents.event_subject" semantic conventions. It represents the
+	// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+	// of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+	return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+	return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+	return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+	return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+	return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+	// OpentracingRefTypeKey is the attribute Key conforming to the
+	// "opentracing.ref_type" semantic conventions. It represents the
+	// parent-child Reference type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span does not depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+	// DBSystemKey is the attribute Key conforming to the "db.system" semantic
+	// conventions. It represents an identifier for the database management
+	// system (DBMS) product being used. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	DBSystemKey = attribute.Key("db.system")
+
+	// DBConnectionStringKey is the attribute Key conforming to the
+	// "db.connection_string" semantic conventions. It represents the
+	// connection string used to connect to the database. It is recommended to
+	// remove embedded credentials.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+	DBConnectionStringKey = attribute.Key("db.connection_string")
+
+	// DBUserKey is the attribute Key conforming to the "db.user" semantic
+	// conventions. It represents the username for accessing the database.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'readonly_user', 'reporting_user'
+	DBUserKey = attribute.Key("db.user")
+
+	// DBJDBCDriverClassnameKey is the attribute Key conforming to the
+	// "db.jdbc.driver_classname" semantic conventions. It represents the
+	// fully-qualified class name of the [Java Database Connectivity
+	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+	// driver used to connect.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'org.postgresql.Driver',
+	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+	// DBNameKey is the attribute Key conforming to the "db.name" semantic
+	// conventions. It represents the this attribute is used to report the name
+	// of the database being accessed. For commands that switch the database,
+	// this should be set to the target database (even if the command fails).
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If applicable.)
+	// Stability: stable
+	// Examples: 'customers', 'main'
+	// Note: In some SQL databases, the database name to be used is called
+	// "schema name". In case there are multiple layers that could be
+	// considered for database name (e.g. Oracle instance name and schema
+	// name), the database name to be used is the more specific layer (e.g.
+	// Oracle schema name).
+	DBNameKey = attribute.Key("db.name")
+
+	// DBStatementKey is the attribute Key conforming to the "db.statement"
+	// semantic conventions. It represents the database statement being
+	// executed.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (Should be collected by default only if
+	// there is sanitization that excludes sensitive information.)
+	// Stability: stable
+	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+	DBStatementKey = attribute.Key("db.statement")
+
+	// DBOperationKey is the attribute Key conforming to the "db.operation"
+	// semantic conventions. It represents the name of the operation being
+	// executed, e.g. the [MongoDB command
+	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+	// such as `findAndModify`, or the SQL keyword.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If `db.statement` is not
+	// applicable.)
+	// Stability: stable
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: When setting this to an SQL keyword, it is not recommended to
+	// attempt any client-side parsing of `db.statement` just to get this
+	// property, but it should be set if the operation name is provided by the
+	// library being instrumented. If the SQL statement has an ambiguous
+	// operation, or performs more than one operation, this value may be
+	// omitted.
+	DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// Microsoft SQL Server Compact
+	DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+	// OpenSearch
+	DBSystemOpensearch = DBSystemKey.String("opensearch")
+	// ClickHouse
+	DBSystemClickhouse = DBSystemKey.String("clickhouse")
+	// Cloud Spanner
+	DBSystemSpanner = DBSystemKey.String("spanner")
+	// Trino
+	DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+	return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+	return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+	return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+	return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+	return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+	return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+	// DBMSSQLInstanceNameKey is the attribute Key conforming to the
+	// "db.mssql.instance_name" semantic conventions. It represents the
+	// Microsoft SQL Server [instance
+	// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+	// connecting to. This name is used to determine the port of a named
+	// instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MSSQLSERVER'
+	// Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+	// required (but still recommended if non-standard).
+	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+	return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+	// DBCassandraPageSizeKey is the attribute Key conforming to the
+	// "db.cassandra.page_size" semantic conventions. It represents the fetch
+	// size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+	// DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+	// "db.cassandra.consistency_level" semantic conventions. It represents the
+	// consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+	// DBCassandraTableKey is the attribute Key conforming to the
+	// "db.cassandra.table" semantic conventions. It represents the name of the
+	// primary table that the operation is acting upon, including the keyspace
+	// name (if applicable).
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'mytable'
+	// Note: This mirrors the db.sql.table attribute but references cassandra
+	// rather than sql. It is not recommended to attempt any client-side
+	// parsing of `db.statement` just to get this property, but it should be
+	// set if it is provided by the library being instrumented. If the
+	// operation is acting upon an anonymous table, or more than one table,
+	// this value MUST NOT be set.
+	DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+	// DBCassandraIdempotenceKey is the attribute Key conforming to the
+	// "db.cassandra.idempotence" semantic conventions. It represents the
+	// whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+	// DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+	// to the "db.cassandra.speculative_execution_count" semantic conventions.
+	// It represents the number of times a query was speculatively executed.
+	// Not set or `0` if the query was not executed speculatively.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+	// DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+	// of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+	// DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.dc" semantic conventions. It represents the
+	// data center of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+	return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+	return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+	return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+	return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+	// DBRedisDBIndexKey is the attribute Key conforming to the
+	// "db.redis.database_index" semantic conventions. It represents the index
+	// of the database being accessed as used in the [`SELECT`
+	// command](https://redis.io/commands/select), provided as an integer. To
+	// be used instead of the generic `db.name` attribute.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If other than the default
+	// database (`0`).)
+	// Stability: stable
+	// Examples: 0, 1, 15
+	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+	return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+	// DBMongoDBCollectionKey is the attribute Key conforming to the
+	// "db.mongodb.collection" semantic conventions. It represents the
+	// collection being accessed within the database stated in `db.name`.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'customers', 'products'
+	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+	return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+	// DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+	// semantic conventions. It represents the name of the primary table that
+	// the operation is acting upon, including the database name (if
+	// applicable).
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'public.users', 'customers'
+	// Note: It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting
+	// upon an anonymous table, or more than one table, this value MUST NOT be
+	// set.
+	DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+	return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+	// DBCosmosDBClientIDKey is the attribute Key conforming to the
+	// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+	// Cosmos client instance id.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+	DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+	// DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+	// "db.cosmosdb.operation_type" semantic conventions. It represents the
+	// cosmosDB Operation Type.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (when performing one of the
+	// operations in this list)
+	// Stability: stable
+	DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+	// DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+	// "db.cosmosdb.connection_mode" semantic conventions. It represents the
+	// cosmos client connection mode.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+	// default))
+	// Stability: stable
+	DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+	// DBCosmosDBContainerKey is the attribute Key conforming to the
+	// "db.cosmosdb.container" semantic conventions. It represents the cosmos
+	// DB container name.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (if available)
+	// Stability: stable
+	// Examples: 'anystring'
+	DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+	// DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_content_length" semantic conventions. It represents
+	// the request payload size in bytes
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+	// DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+	// DB status code.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (if response was received)
+	// Stability: stable
+	// Examples: 200, 201
+	DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+	// DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+	// cosmos DB sub status code.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (when response was received and
+	// contained sub-code.)
+	// Stability: stable
+	// Examples: 1000, 1002
+	DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+	// DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+	// consumed for that operation
+	//
+	// Type: double
+	// RequirementLevel: ConditionallyRequired (when available)
+	// Stability: stable
+	// Examples: 46.18, 1.0
+	DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+	// invalid
+	DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+	// create
+	DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+	// patch
+	DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+	// read
+	DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+	// read_feed
+	DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+	// delete
+	DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+	// replace
+	DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+	// execute
+	DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+	// query
+	DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+	// head
+	DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+	// head_feed
+	DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+	// upsert
+	DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+	// batch
+	DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+	// query_plan
+	DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+	// execute_javascript
+	DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+	// Gateway (HTTP) connections mode
+	DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+	// Direct connection
+	DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+	return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+	return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+	return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+	return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+	// OTelStatusCodeKey is the attribute Key conforming to the
+	// "otel.status_code" semantic conventions. It represents the name of the
+	// code, either "OK" or "ERROR". MUST NOT be set if the status code is
+	// UNSET.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+	// OTelStatusDescriptionKey is the attribute Key conforming to the
+	// "otel.status_description" semantic conventions. It represents the
+	// description of the Status if it has a value, otherwise not set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'resource not found'
+	OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+	// The operation has been validated by an Application developer or Operator to have completed successfully
+	OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+	// The operation contains an error
+	OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+	return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+	// FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+	// semantic conventions. It represents the type of the trigger which caused
+	// this function invocation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: For the server/consumer span on the incoming side,
+	// `faas.trigger` MUST be set.
+	//
+	// Clients invoking FaaS instances usually cannot set `faas.trigger`,
+	// since they would typically need to look in the payload to determine
+	// the event type. If clients set it, it should be the same as the
+	// trigger that corresponding incoming would have (i.e., this has
+	// nothing to do with the underlying transport used to make the API
+	// call to invoke the lambda, which is often HTTP).
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+
+	// FaaSInvocationIDKey is the attribute Key conforming to the
+	// "faas.invocation_id" semantic conventions. It represents the invocation
+	// ID of the current function invocation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+	return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+	// FaaSDocumentCollectionKey is the attribute Key conforming to the
+	// "faas.document.collection" semantic conventions. It represents the name
+	// of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in
+	// Cosmos DB to the database name.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+	// FaaSDocumentOperationKey is the attribute Key conforming to the
+	// "faas.document.operation" semantic conventions. It represents the
+	// describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+	// FaaSDocumentTimeKey is the attribute Key conforming to the
+	// "faas.document.time" semantic conventions. It represents a string
+	// containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+	// FaaSDocumentNameKey is the attribute Key conforming to the
+	// "faas.document.name" semantic conventions. It represents the document
+	// name/table subjected to the operation. For example, in Cloud Storage or
+	// S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+	return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+	return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+	return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+	// FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+	// conventions. It represents a string containing the function invocation
+	// time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+
+	// FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+	// conventions. It represents a string containing the schedule period as
+	// [Cron
+	// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+	return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+	return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+	// FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+	// semantic conventions. It represents a boolean that is true if the
+	// serverless function is executed for the first time (aka cold-start).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+	return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+	// FaaSInvokedNameKey is the attribute Key conforming to the
+	// "faas.invoked_name" semantic conventions. It represents the name of the
+	// invoked function.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the
+	// invoked function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+	// FaaSInvokedProviderKey is the attribute Key conforming to the
+	// "faas.invoked_provider" semantic conventions. It represents the cloud
+	// provider of the invoked function.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+	// invoked function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+	// FaaSInvokedRegionKey is the attribute Key conforming to the
+	// "faas.invoked_region" semantic conventions. It represents the cloud
+	// region of the invoked function.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (For some cloud providers, like
+	// AWS or GCP, the region in which a function is hosted is essential to
+	// uniquely identify the function and also part of its endpoint. Since it's
+	// part of the endpoint being called, the region is always known to
+	// clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+	// If the region is unknown to the client or not required for identifying
+	// the invoked function, setting `faas.invoked_region` is optional.)
+	// Stability: stable
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the
+	// invoked function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+	return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+	return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+	// PeerServiceKey is the attribute Key conforming to the "peer.service"
+	// semantic conventions. It represents the
+	// [`service.name`](/docs/resource/README.md#service) of the remote
+	// service. SHOULD be equal to the actual `service.name` resource attribute
+	// of the remote service if any.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+	return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+	// EnduserIDKey is the attribute Key conforming to the "enduser.id"
+	// semantic conventions. It represents the username or client_id extracted
+	// from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+	// in the inbound request from outside the system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+
+	// EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+	// semantic conventions. It represents the actual/assumed role the client
+	// is making the request under extracted from token or application security
+	// context.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+
+	// EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+	// semantic conventions. It represents the scopes or granted authorities
+	// the client currently possesses extracted from token or application
+	// security context. The value would come from the scope associated with an
+	// [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+	// value in a [SAML 2.0
+	// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+	return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+	return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+	return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+	// ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+	// conventions. It represents the current "managed" thread ID (as opposed
+	// to OS thread ID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+
+	// ThreadNameKey is the attribute Key conforming to the "thread.name"
+	// semantic conventions. It represents the current thread name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+	return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+	return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+	// CodeFunctionKey is the attribute Key conforming to the "code.function"
+	// semantic conventions. It represents the method or function name, or
+	// equivalent (usually rightmost part of the code unit's name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+
+	// CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+	// semantic conventions. It represents the "namespace" within which
+	// `code.function` is defined. Usually the qualified class or module name,
+	// such that `code.namespace` + some separator + `code.function` form a
+	// unique identifier for the code unit.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+
+	// CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+	// semantic conventions. It represents the source code file name that
+	// identifies the code unit as uniquely as possible (preferably an absolute
+	// file path).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+
+	// CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+	// semantic conventions. It represents the line number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+
+	// CodeColumnKey is the attribute Key conforming to the "code.column"
+	// semantic conventions. It represents the column number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 16
+	CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+	return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+	return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+	return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+	return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+	return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+	// HTTPResendCountKey is the attribute Key conforming to the
+	// "http.resend_count" semantic conventions. It represents the ordinal
+	// number of request resending attempt (for any reason, including
+	// redirects).
+	//
+	// Type: int
+	// RequirementLevel: Recommended (if and only if request was retried.)
+	// Stability: stable
+	// Examples: 3
+	// Note: The resend count SHOULD be updated each time an HTTP request gets
+	// resent by the client, regardless of what was the cause of the resending
+	// (e.g. redirection, authorization failure, 503 Server Unavailable,
+	// network issues, or any other).
+	HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+	return HTTPResendCountKey.Int(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+	// AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+	// semantic conventions. It represents the AWS request ID as returned in
+	// the response headers `x-amz-request-id` or `x-amz-requestid`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+	AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+	return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+	// AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_names" semantic conventions. It represents the keys
+	// in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+	// AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+	// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+	// JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+	// "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number }, "TableName": "string",
+	// "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+	// AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+	// the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+	// represents the JSON-serialized value of the `ItemCollectionMetrics`
+	// response field.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+	// blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+	// "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+	// "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+	// "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+	// AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+	// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+	// represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+	// request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+	// AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+	// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+	// It represents the value of the
+	// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+	// AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+	// "aws.dynamodb.consistent_read" semantic conventions. It represents the
+	// value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+	// AWSDynamoDBProjectionKey is the attribute Key conforming to the
+	// "aws.dynamodb.projection" semantic conventions. It represents the value
+	// of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+	// RelatedItems, ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+	// AWSDynamoDBLimitKey is the attribute Key conforming to the
+	// "aws.dynamodb.limit" semantic conventions. It represents the value of
+	// the `Limit` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+	// AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+	// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+	// value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+	// AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+	// "aws.dynamodb.index_name" semantic conventions. It represents the value
+	// of the `IndexName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+	// AWSDynamoDBSelectKey is the attribute Key conforming to the
+	// "aws.dynamodb.select" semantic conventions. It represents the value of
+	// the `Select` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+	return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+	return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+	return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+	return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+	return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+	return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+	return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+	return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+	// AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `GlobalSecondaryIndexes` request field
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+	// "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+	// "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+	// AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `LocalSecondaryIndexes` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "IndexARN": "string", "IndexName": "string",
+	// "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+	// AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+	// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+	// the value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+	// AWSDynamoDBTableCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_count" semantic conventions. It represents the the
+	// number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+	return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+	return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+	// AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+	// "aws.dynamodb.scan_forward" semantic conventions. It represents the
+	// value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+	return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+	// AWSDynamoDBSegmentKey is the attribute Key conforming to the
+	// "aws.dynamodb.segment" semantic conventions. It represents the value of
+	// the `Segment` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+	// AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+	// "aws.dynamodb.total_segments" semantic conventions. It represents the
+	// value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+	// AWSDynamoDBCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.count" semantic conventions. It represents the value of
+	// the `Count` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+	// AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.scanned_count" semantic conventions. It represents the
+	// value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+	return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+	return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+	return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+	return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+	// AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+	// the "aws.dynamodb.attribute_definitions" semantic conventions. It
+	// represents the JSON-serialized value of each item in the
+	// `AttributeDefinitions` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+	// AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+	// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+	// conventions. It represents the JSON-serialized value of each item in the
+	// the `GlobalSecondaryIndexUpdates` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+	// AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+	// semantic conventions. It represents the S3 bucket name the request
+	// refers to. Corresponds to the `--bucket` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'some-bucket-name'
+	// Note: The `bucket` attribute is applicable to all S3 operations that
+	// reference a bucket, i.e. that require the bucket name as a mandatory
+	// parameter.
+	// This applies to almost all S3 operations except `list-buckets`.
+	AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+	// AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+	// conventions. It represents the S3 object key the request refers to.
+	// Corresponds to the `--key` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'someFile.yml'
+	// Note: The `key` attribute is applicable to all object-related S3
+	// operations, i.e. that require the object key as a mandatory parameter.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// -
+	// [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+	// -
+	// [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+	// -
+	// [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+	// -
+	// [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+	// -
+	// [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+	// AWSS3CopySourceKey is the attribute Key conforming to the
+	// "aws.s3.copy_source" semantic conventions. It represents the source
+	// object (in the form `bucket`/`key`) for the copy operation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'someFile.yml'
+	// Note: The `copy_source` attribute applies to S3 copy operations and
+	// corresponds to the `--copy-source` parameter
+	// of the [copy-object operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+	// AWSS3UploadIDKey is the attribute Key conforming to the
+	// "aws.s3.upload_id" semantic conventions. It represents the upload ID
+	// that identifies the multipart upload.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+	// Note: The `upload_id` attribute applies to S3 multipart-upload
+	// operations and corresponds to the `--upload-id` parameter
+	// of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// multipart operations.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+	// AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+	// semantic conventions. It represents the delete request container that
+	// specifies the objects to be deleted.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+	// Note: The `delete` attribute is only applicable to the
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// operation.
+	// The `delete` attribute corresponds to the `--delete` parameter of the
+	// [delete-objects operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+	AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+	// AWSS3PartNumberKey is the attribute Key conforming to the
+	// "aws.s3.part_number" semantic conventions. It represents the part number
+	// of the part being uploaded in a multipart-upload operation. This is a
+	// positive integer between 1 and 10,000.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3456
+	// Note: The `part_number` attribute is only applicable to the
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// and
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	// operations.
+	// The `part_number` attribute corresponds to the `--part-number` parameter
+	// of the
+	// [upload-part operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+	AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+	return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+	return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+	return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+	return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+	return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+	return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+	// GraphqlOperationNameKey is the attribute Key conforming to the
+	// "graphql.operation.name" semantic conventions. It represents the name of
+	// the operation being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'findBookByID'
+	GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+	// GraphqlOperationTypeKey is the attribute Key conforming to the
+	// "graphql.operation.type" semantic conventions. It represents the type of
+	// the operation being executed.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'query', 'mutation', 'subscription'
+	GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+	// GraphqlDocumentKey is the attribute Key conforming to the
+	// "graphql.document" semantic conventions. It represents the GraphQL
+	// document being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+	// Note: The value may be sanitized to exclude sensitive information.
+	GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+	// GraphQL query
+	GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+	// GraphQL mutation
+	GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+	// GraphQL subscription
+	GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+	return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+	return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+	// MessagingSystemKey is the attribute Key conforming to the
+	// "messaging.system" semantic conventions. It represents a string
+	// identifying the messaging system.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+	MessagingSystemKey = attribute.Key("messaging.system")
+
+	// MessagingOperationKey is the attribute Key conforming to the
+	// "messaging.operation" semantic conventions. It represents a string
+	// identifying the kind of messaging operation as defined in the [Operation
+	// names](#operation-names) section above.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Note: If a custom value is used, it MUST be of low cardinality.
+	MessagingOperationKey = attribute.Key("messaging.operation")
+
+	// MessagingBatchMessageCountKey is the attribute Key conforming to the
+	// "messaging.batch.message_count" semantic conventions. It represents the
+	// number of messages sent, received, or processed in the scope of the
+	// batching operation.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the span describes an
+	// operation on a batch of messages.)
+	// Stability: stable
+	// Examples: 0, 1, 2
+	// Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+	// spans that operate with a single message. When a messaging client
+	// library supports both batch and single-message API for the same
+	// operation, instrumentations SHOULD use `messaging.batch.message_count`
+	// for batching APIs and SHOULD NOT use it for single-message APIs.
+	MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+	// MessagingClientIDKey is the attribute Key conforming to the
+	// "messaging.client_id" semantic conventions. It represents a unique
+	// identifier for the client that consumes or produces a message.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (If a client id is available)
+	// Stability: stable
+	// Examples: 'client-5', 'myhost@8742@s8083jm'
+	MessagingClientIDKey = attribute.Key("messaging.client_id")
+)
+
+var (
+	// publish
+	MessagingOperationPublish = MessagingOperationKey.String("publish")
+	// receive
+	MessagingOperationReceive = MessagingOperationKey.String("receive")
+	// process
+	MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+	return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+	return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+	return MessagingClientIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+	// RPCSystemKey is the attribute Key conforming to the "rpc.system"
+	// semantic conventions. It represents a string identifying the remoting
+	// system. See below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	RPCSystemKey = attribute.Key("rpc.system")
+
+	// RPCServiceKey is the attribute Key conforming to the "rpc.service"
+	// semantic conventions. It represents the full (logical) name of the
+	// service being called, including its package name, if applicable.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// class. The `code.namespace` attribute may be used to store the latter
+	// (despite the attribute name, it may include a class name; e.g., class
+	// with method actually executing the call on the server side, RPC client
+	// stub class on the client side).
+	RPCServiceKey = attribute.Key("rpc.service")
+
+	// RPCMethodKey is the attribute Key conforming to the "rpc.method"
+	// semantic conventions. It represents the name of the (logical) method
+	// being called, must be equal to the $method part in the span name.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the
+	// latter (e.g., method actually executing the call on the server side, RPC
+	// client stub method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+	// Connect RPC
+	RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+	return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+	return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+	// RPCGRPCStatusCodeKey is the attribute Key conforming to the
+	// "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+	// status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+	// the gRPC request.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+	// RPCJsonrpcVersionKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+	// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+	// does not specify this, the value can be omitted.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If other than the default
+	// version (`1.0`))
+	// Stability: stable
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+	// RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+	// property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be
+	// cast to string for simplicity. Use empty string in case of `null` value.
+	// Omit entirely if this is a notification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+	// RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If response is not successful.)
+	// Stability: stable
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+	// RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+	return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+	return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+	return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+	return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+	// RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+	// "rpc.connect_rpc.error_code" semantic conventions. It represents the
+	// [error codes](https://connect.build/docs/protocol/#error-codes) of the
+	// Connect request. Error codes are always string values.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (If response is not successful
+	// and if error code available.)
+	// Stability: stable
+	RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+	// cancelled
+	RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+	// unknown
+	RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+	// invalid_argument
+	RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+	// deadline_exceeded
+	RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+	// not_found
+	RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+	// already_exists
+	RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+	// permission_denied
+	RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+	// resource_exhausted
+	RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+	// failed_precondition
+	RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+	// aborted
+	RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+	// out_of_range
+	RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+	// unimplemented
+	RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+	// internal
+	RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+	// unavailable
+	RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+	// data_loss
+	RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+	// unauthenticated
+	RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
new file mode 100644
index 0000000000000000000000000000000000000000..31726598d61822db6c729b6ba61a1669d8252c89
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
@@ -0,0 +1,4398 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes FaaS attributes.
+const (
+	// FaaSInvokedNameKey is the attribute Key conforming to the
+	// "faas.invoked_name" semantic conventions. It represents the name of the
+	// invoked function.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the
+	// invoked function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+	// FaaSInvokedProviderKey is the attribute Key conforming to the
+	// "faas.invoked_provider" semantic conventions. It represents the cloud
+	// provider of the invoked function.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+	// invoked function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+	// FaaSInvokedRegionKey is the attribute Key conforming to the
+	// "faas.invoked_region" semantic conventions. It represents the cloud
+	// region of the invoked function.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (For some cloud providers, like
+	// AWS or GCP, the region in which a function is hosted is essential to
+	// uniquely identify the function and also part of its endpoint. Since it's
+	// part of the endpoint being called, the region is always known to
+	// clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+	// If the region is unknown to the client or not required for identifying
+	// the invoked function, setting `faas.invoked_region` is optional.)
+	// Stability: experimental
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the
+	// invoked function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+	// FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+	// semantic conventions. It represents the type of the trigger which caused
+	// this function invocation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+	return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+	return FaaSInvokedRegionKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+	// EventNameKey is the attribute Key conforming to the "event.name"
+	// semantic conventions. It represents the identifies the class / type of
+	// event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'browser.mouse.click', 'device.app.lifecycle'
+	// Note: Event names are subject to the same rules as [attribute
+	// names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md).
+	// Notably, event names are namespaced to avoid collisions and provide a
+	// clean separation of semantics for events in separate domains like
+	// browser, mobile, and kubernetes.
+	EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+	return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+	// LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+	// semantic conventions. It represents a unique identifier for the Log
+	// Record.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+	// Note: If an id is provided, other log records with the same id will be
+	// considered duplicates and can be removed safely. This means, that two
+	// distinguishable log records MUST have different values.
+	// The id MAY be an [Universally Unique Lexicographically Sortable
+	// Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+	// (e.g. UUID) may be used as needed.
+	LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+	return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+	// LogIostreamKey is the attribute Key conforming to the "log.iostream"
+	// semantic conventions. It represents the stream associated with the log.
+	// See below for a list of well-known values.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+	// Logs from stdout stream
+	LogIostreamStdout = LogIostreamKey.String("stdout")
+	// Events from stderr stream
+	LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+	// LogFileNameKey is the attribute Key conforming to the "log.file.name"
+	// semantic conventions. It represents the basename of the file.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'audit.log'
+	LogFileNameKey = attribute.Key("log.file.name")
+
+	// LogFileNameResolvedKey is the attribute Key conforming to the
+	// "log.file.name_resolved" semantic conventions. It represents the
+	// basename of the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'uuid.log'
+	LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+	// LogFilePathKey is the attribute Key conforming to the "log.file.path"
+	// semantic conventions. It represents the full path to the file.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/var/log/mysql/audit.log'
+	LogFilePathKey = attribute.Key("log.file.path")
+
+	// LogFilePathResolvedKey is the attribute Key conforming to the
+	// "log.file.path_resolved" semantic conventions. It represents the full
+	// path to the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/var/lib/docker/uuid.log'
+	LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+	return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+	return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+	return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+	return LogFilePathResolvedKey.String(val)
+}
+
+// Describes Database attributes
+const (
+	// PoolNameKey is the attribute Key conforming to the "pool.name" semantic
+	// conventions. It represents the name of the connection pool; unique
+	// within the instrumented application. In case the connection pool
+	// implementation doesn't provide a name, then the
+	// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+	// should be used
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'myDataSource'
+	PoolNameKey = attribute.Key("pool.name")
+
+	// StateKey is the attribute Key conforming to the "state" semantic
+	// conventions. It represents the state of a connection in the pool
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'idle'
+	StateKey = attribute.Key("state")
+)
+
+var (
+	// idle
+	StateIdle = StateKey.String("idle")
+	// used
+	StateUsed = StateKey.String("used")
+)
+
+// PoolName returns an attribute KeyValue conforming to the "pool.name"
+// semantic conventions. It represents the name of the connection pool; unique
+// within the instrumented application. In case the connection pool
+// implementation doesn't provide a name, then the
+// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+// should be used
+func PoolName(val string) attribute.KeyValue {
+	return PoolNameKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+	// AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+	// the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+	// represents the full type name of the
+	// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+	// implementation that handled the exception.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (if and only if the exception
+	// was handled by this handler.)
+	// Stability: experimental
+	// Examples: 'Contoso.MyHandler'
+	AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+	// AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+	// "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+	// the rate limiting policy name.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (if the matched endpoint for the
+	// request had a rate-limiting policy.)
+	// Stability: experimental
+	// Examples: 'fixed', 'sliding', 'token'
+	AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+	// AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+	// "aspnetcore.rate_limiting.result" semantic conventions. It represents
+	// the rate-limiting result, shows whether the lease was acquired or
+	// contains a rejection reason
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'acquired', 'request_canceled'
+	AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+	// AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+	// "aspnetcore.request.is_unhandled" semantic conventions. It represents
+	// the flag indicating if request was handled by the application pipeline.
+	//
+	// Type: boolean
+	// RequirementLevel: ConditionallyRequired (if and only if the request was
+	// not handled.)
+	// Stability: experimental
+	// Examples: True
+	AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+	// AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+	// "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+	// value that indicates whether the matched route is a fallback route.
+	//
+	// Type: boolean
+	// RequirementLevel: ConditionallyRequired (If and only if a route was
+	// successfully matched.)
+	// Stability: experimental
+	// Examples: True
+	AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+)
+
+var (
+	// Lease was acquired
+	AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+	// Lease request was rejected by the endpoint limiter
+	AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+	// Lease request was rejected by the global limiter
+	AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+	// Lease request was canceled
+	AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+	return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+	return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+	return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+	return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// SignalR attributes
+const (
+	// SignalrConnectionStatusKey is the attribute Key conforming to the
+	// "signalr.connection.status" semantic conventions. It represents the
+	// signalR HTTP connection closure status.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'app_shutdown', 'timeout'
+	SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+	// SignalrTransportKey is the attribute Key conforming to the
+	// "signalr.transport" semantic conventions. It represents the [SignalR
+	// transport
+	// type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'web_sockets', 'long_polling'
+	SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+	// The connection was closed normally
+	SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+	// The connection was closed due to a timeout
+	SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+	// The connection was closed because the app is shutting down
+	SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+	// ServerSentEvents protocol
+	SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+	// LongPolling protocol
+	SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+	// WebSockets protocol
+	SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// Describes JVM buffer metric attributes.
+const (
+	// JvmBufferPoolNameKey is the attribute Key conforming to the
+	// "jvm.buffer.pool.name" semantic conventions. It represents the name of
+	// the buffer pool.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'mapped', 'direct'
+	// Note: Pool names are generally obtained via
+	// [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+	JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+	return JvmBufferPoolNameKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+	// JvmMemoryPoolNameKey is the attribute Key conforming to the
+	// "jvm.memory.pool.name" semantic conventions. It represents the name of
+	// the memory pool.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+	// Note: Pool names are generally obtained via
+	// [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+	JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+	// JvmMemoryTypeKey is the attribute Key conforming to the
+	// "jvm.memory.type" semantic conventions. It represents the type of
+	// memory.
+	//
+	// Type: Enum
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'heap', 'non_heap'
+	JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+)
+
+var (
+	// Heap memory
+	JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+	// Non-heap memory
+	JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+	return JvmMemoryPoolNameKey.String(val)
+}
+
+// Describes System metric attributes
+const (
+	// SystemDeviceKey is the attribute Key conforming to the "system.device"
+	// semantic conventions. It represents the device identifier
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '(identifier)'
+	SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+	return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU metric attributes
+const (
+	// SystemCPULogicalNumberKey is the attribute Key conforming to the
+	// "system.cpu.logical_number" semantic conventions. It represents the
+	// logical CPU number [0..n-1]
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1
+	SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+	// SystemCPUStateKey is the attribute Key conforming to the
+	// "system.cpu.state" semantic conventions. It represents the state of the
+	// CPU
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'idle', 'interrupt'
+	SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+	// user
+	SystemCPUStateUser = SystemCPUStateKey.String("user")
+	// system
+	SystemCPUStateSystem = SystemCPUStateKey.String("system")
+	// nice
+	SystemCPUStateNice = SystemCPUStateKey.String("nice")
+	// idle
+	SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+	// iowait
+	SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+	// interrupt
+	SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+	// steal
+	SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+	return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory metric attributes
+const (
+	// SystemMemoryStateKey is the attribute Key conforming to the
+	// "system.memory.state" semantic conventions. It represents the memory
+	// state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'free', 'cached'
+	SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+	// used
+	SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+	// free
+	SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+	// shared
+	SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+	// buffers
+	SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+	// cached
+	SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging metric attributes
+const (
+	// SystemPagingDirectionKey is the attribute Key conforming to the
+	// "system.paging.direction" semantic conventions. It represents the paging
+	// access direction
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'in'
+	SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+	// SystemPagingStateKey is the attribute Key conforming to the
+	// "system.paging.state" semantic conventions. It represents the memory
+	// paging state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'free'
+	SystemPagingStateKey = attribute.Key("system.paging.state")
+
+	// SystemPagingTypeKey is the attribute Key conforming to the
+	// "system.paging.type" semantic conventions. It represents the memory
+	// paging type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'minor'
+	SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+	// in
+	SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+	// out
+	SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+	// used
+	SystemPagingStateUsed = SystemPagingStateKey.String("used")
+	// free
+	SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+	// major
+	SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+	// minor
+	SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem metric attributes
+const (
+	// SystemFilesystemModeKey is the attribute Key conforming to the
+	// "system.filesystem.mode" semantic conventions. It represents the
+	// filesystem mode
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'rw, ro'
+	SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+	// SystemFilesystemMountpointKey is the attribute Key conforming to the
+	// "system.filesystem.mountpoint" semantic conventions. It represents the
+	// filesystem mount path
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/mnt/data'
+	SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+	// SystemFilesystemStateKey is the attribute Key conforming to the
+	// "system.filesystem.state" semantic conventions. It represents the
+	// filesystem state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'used'
+	SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+	// SystemFilesystemTypeKey is the attribute Key conforming to the
+	// "system.filesystem.type" semantic conventions. It represents the
+	// filesystem type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ext4'
+	SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+	// used
+	SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+	// free
+	SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+	// reserved
+	SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+	// fat32
+	SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+	// exfat
+	SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+	// ntfs
+	SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+	// refs
+	SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+	// hfsplus
+	SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+	// ext4
+	SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+	return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+	return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network metric attributes
+const (
+	// SystemNetworkStateKey is the attribute Key conforming to the
+	// "system.network.state" semantic conventions. It represents a stateless
+	// protocol MUST NOT set this attribute
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'close_wait'
+	SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+	// close
+	SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+	// close_wait
+	SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+	// closing
+	SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+	// delete
+	SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+	// established
+	SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+	// fin_wait_1
+	SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+	// fin_wait_2
+	SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+	// last_ack
+	SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+	// listen
+	SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+	// syn_recv
+	SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+	// syn_sent
+	SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+	// time_wait
+	SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process metric attributes
+const (
+	// SystemProcessesStatusKey is the attribute Key conforming to the
+	// "system.processes.status" semantic conventions. It represents the
+	// process state, e.g., [Linux Process State
+	// Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'running'
+	SystemProcessesStatusKey = attribute.Key("system.processes.status")
+)
+
+var (
+	// running
+	SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
+	// sleeping
+	SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
+	// stopped
+	SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
+	// defunct
+	SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
+)
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+	// ClientAddressKey is the attribute Key conforming to the "client.address"
+	// semantic conventions. It represents the client address - domain name if
+	// available without reverse DNS lookup; otherwise, IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.address` SHOULD represent the client address
+	// behind any intermediaries,  for example proxies, if it's available.
+	ClientAddressKey = attribute.Key("client.address")
+
+	// ClientPortKey is the attribute Key conforming to the "client.port"
+	// semantic conventions. It represents the client port number.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.port` SHOULD represent the client port behind
+	// any intermediaries,  for example proxies, if it's available.
+	ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+	return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+	return ClientPortKey.Int(val)
+}
+
+// The attributes used to describe telemetry in the context of databases.
+const (
+	// DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+	// "db.cassandra.consistency_level" semantic conventions. It represents the
+	// consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+	// DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.dc" semantic conventions. It represents the
+	// data center of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+	// DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+	// of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+	// DBCassandraIdempotenceKey is the attribute Key conforming to the
+	// "db.cassandra.idempotence" semantic conventions. It represents the
+	// whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+	// DBCassandraPageSizeKey is the attribute Key conforming to the
+	// "db.cassandra.page_size" semantic conventions. It represents the fetch
+	// size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+	// DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+	// to the "db.cassandra.speculative_execution_count" semantic conventions.
+	// It represents the number of times a query was speculatively executed.
+	// Not set or `0` if the query was not executed speculatively.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+	// DBCassandraTableKey is the attribute Key conforming to the
+	// "db.cassandra.table" semantic conventions. It represents the name of the
+	// primary Cassandra table that the operation is acting upon, including the
+	// keyspace name (if applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mytable'
+	// Note: This mirrors the db.sql.table attribute but references cassandra
+	// rather than sql. It is not recommended to attempt any client-side
+	// parsing of `db.statement` just to get this property, but it should be
+	// set if it is provided by the library being instrumented. If the
+	// operation is acting upon an anonymous table, or more than one table,
+	// this value MUST NOT be set.
+	DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+	// DBConnectionStringKey is the attribute Key conforming to the
+	// "db.connection_string" semantic conventions. It represents the
+	// connection string used to connect to the database. It is recommended to
+	// remove embedded credentials.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+	DBConnectionStringKey = attribute.Key("db.connection_string")
+
+	// DBCosmosDBClientIDKey is the attribute Key conforming to the
+	// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+	// Cosmos client instance id.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+	DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+	// DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+	// "db.cosmosdb.connection_mode" semantic conventions. It represents the
+	// cosmos client connection mode.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+	// DBCosmosDBContainerKey is the attribute Key conforming to the
+	// "db.cosmosdb.container" semantic conventions. It represents the cosmos
+	// DB container name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'anystring'
+	DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+	// DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+	// "db.cosmosdb.operation_type" semantic conventions. It represents the
+	// cosmosDB Operation Type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+	// DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+	// consumed for that operation
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 46.18, 1.0
+	DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+	// DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_content_length" semantic conventions. It represents
+	// the request payload size in bytes
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+	// DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+	// DB status code.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 200, 201
+	DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+	// DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+	// cosmos DB sub status code.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1000, 1002
+	DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+	// DBElasticsearchClusterNameKey is the attribute Key conforming to the
+	// "db.elasticsearch.cluster.name" semantic conventions. It represents the
+	// represents the identifier of an Elasticsearch cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+	DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+	// DBElasticsearchNodeNameKey is the attribute Key conforming to the
+	// "db.elasticsearch.node.name" semantic conventions. It represents the
+	// represents the human-readable identifier of the node/instance to which a
+	// request was routed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'instance-0000000001'
+	DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+
+	// DBInstanceIDKey is the attribute Key conforming to the "db.instance.id"
+	// semantic conventions. It represents an identifier (address, unique name,
+	// or any other identifier) of the database instance that is executing
+	// queries or mutations on the current connection. This is useful in cases
+	// where the database is running in a clustered environment and the
+	// instrumentation is able to record the node executing the query. The
+	// client may obtain this value in databases like MySQL using queries like
+	// `select @@hostname`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mysql-e26b99z.example.com'
+	DBInstanceIDKey = attribute.Key("db.instance.id")
+
+	// DBJDBCDriverClassnameKey is the attribute Key conforming to the
+	// "db.jdbc.driver_classname" semantic conventions. It represents the
+	// fully-qualified class name of the [Java Database Connectivity
+	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+	// driver used to connect.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'org.postgresql.Driver',
+	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+	// DBMongoDBCollectionKey is the attribute Key conforming to the
+	// "db.mongodb.collection" semantic conventions. It represents the MongoDB
+	// collection being accessed within the database stated in `db.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'customers', 'products'
+	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+
+	// DBMSSQLInstanceNameKey is the attribute Key conforming to the
+	// "db.mssql.instance_name" semantic conventions. It represents the
+	// Microsoft SQL Server [instance
+	// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+	// connecting to. This name is used to determine the port of a named
+	// instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MSSQLSERVER'
+	// Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+	// required (but still recommended if non-standard).
+	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+
+	// DBNameKey is the attribute Key conforming to the "db.name" semantic
+	// conventions. It represents the this attribute is used to report the name
+	// of the database being accessed. For commands that switch the database,
+	// this should be set to the target database (even if the command fails).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'customers', 'main'
+	// Note: In some SQL databases, the database name to be used is called
+	// "schema name". In case there are multiple layers that could be
+	// considered for database name (e.g. Oracle instance name and schema
+	// name), the database name to be used is the more specific layer (e.g.
+	// Oracle schema name).
+	DBNameKey = attribute.Key("db.name")
+
+	// DBOperationKey is the attribute Key conforming to the "db.operation"
+	// semantic conventions. It represents the name of the operation being
+	// executed, e.g. the [MongoDB command
+	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+	// such as `findAndModify`, or the SQL keyword.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: When setting this to an SQL keyword, it is not recommended to
+	// attempt any client-side parsing of `db.statement` just to get this
+	// property, but it should be set if the operation name is provided by the
+	// library being instrumented. If the SQL statement has an ambiguous
+	// operation, or performs more than one operation, this value may be
+	// omitted.
+	DBOperationKey = attribute.Key("db.operation")
+
+	// DBRedisDBIndexKey is the attribute Key conforming to the
+	// "db.redis.database_index" semantic conventions. It represents the index
+	// of the database being accessed as used in the [`SELECT`
+	// command](https://redis.io/commands/select), provided as an integer. To
+	// be used instead of the generic `db.name` attribute.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 1, 15
+	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+
+	// DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+	// semantic conventions. It represents the name of the primary table that
+	// the operation is acting upon, including the database name (if
+	// applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'public.users', 'customers'
+	// Note: It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting
+	// upon an anonymous table, or more than one table, this value MUST NOT be
+	// set.
+	DBSQLTableKey = attribute.Key("db.sql.table")
+
+	// DBStatementKey is the attribute Key conforming to the "db.statement"
+	// semantic conventions. It represents the database statement being
+	// executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+	DBStatementKey = attribute.Key("db.statement")
+
+	// DBSystemKey is the attribute Key conforming to the "db.system" semantic
+	// conventions. It represents an identifier for the database management
+	// system (DBMS) product being used. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBSystemKey = attribute.Key("db.system")
+
+	// DBUserKey is the attribute Key conforming to the "db.user" semantic
+	// conventions. It represents the username for accessing the database.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'readonly_user', 'reporting_user'
+	DBUserKey = attribute.Key("db.user")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+var (
+	// Gateway (HTTP) connections mode
+	DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+	// Direct connection
+	DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+	// invalid
+	DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+	// create
+	DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+	// patch
+	DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+	// read
+	DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+	// read_feed
+	DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+	// delete
+	DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+	// replace
+	DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+	// execute
+	DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+	// query
+	DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+	// head
+	DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+	// head_feed
+	DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+	// upsert
+	DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+	// batch
+	DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+	// query_plan
+	DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+	// execute_javascript
+	DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// Microsoft SQL Server Compact
+	DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+	// OpenSearch
+	DBSystemOpensearch = DBSystemKey.String("opensearch")
+	// ClickHouse
+	DBSystemClickhouse = DBSystemKey.String("clickhouse")
+	// Cloud Spanner
+	DBSystemSpanner = DBSystemKey.String("spanner")
+	// Trino
+	DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+	return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+	return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+	return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary Cassandra table that the operation is acting upon, including the
+// keyspace name (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+	return DBCassandraTableKey.String(val)
+}
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+	return DBConnectionStringKey.String(val)
+}
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+	return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+	return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+	return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+	return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+	return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+	return DBElasticsearchNodeNameKey.String(val)
+}
+
+// DBInstanceID returns an attribute KeyValue conforming to the
+// "db.instance.id" semantic conventions. It represents an identifier (address,
+// unique name, or any other identifier) of the database instance that is
+// executing queries or mutations on the current connection. This is useful in
+// cases where the database is running in a clustered environment and the
+// instrumentation is able to record the node executing the query. The client
+// may obtain this value in databases like MySQL using queries like `select
+// @@hostname`.
+func DBInstanceID(val string) attribute.KeyValue {
+	return DBInstanceIDKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+	return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the MongoDB
+// collection being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+	return DBMongoDBCollectionKey.String(val)
+}
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+	return DBMSSQLInstanceNameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+	return DBNameKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+	return DBOperationKey.String(val)
+}
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+	return DBRedisDBIndexKey.Int(val)
+}
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+	return DBSQLTableKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+	return DBStatementKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+	return DBUserKey.String(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+	// HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+	// semantic conventions.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorKey = attribute.Key("http.flavor")
+
+	// HTTPMethodKey is the attribute Key conforming to the "http.method"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'GET', 'POST', 'HEAD'
+	// Deprecated: use `http.request.method` instead.
+	HTTPMethodKey = attribute.Key("http.method")
+
+	// HTTPRequestContentLengthKey is the attribute Key conforming to the
+	// "http.request_content_length" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 3495
+	// Deprecated: use `http.request.header.content-length` instead.
+	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+	// HTTPResponseContentLengthKey is the attribute Key conforming to the
+	// "http.response_content_length" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 3495
+	// Deprecated: use `http.response.header.content-length` instead.
+	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+
+	// HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'http', 'https'
+	// Deprecated: use `url.scheme` instead.
+	HTTPSchemeKey = attribute.Key("http.scheme")
+
+	// HTTPStatusCodeKey is the attribute Key conforming to the
+	// "http.status_code" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 200
+	// Deprecated: use `http.response.status_code` instead.
+	HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+	// HTTPTargetKey is the attribute Key conforming to the "http.target"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/search?q=OpenTelemetry#SemConv'
+	// Deprecated: use `url.path` and `url.query` instead.
+	HTTPTargetKey = attribute.Key("http.target")
+
+	// HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+	// conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+	// Deprecated: use `url.full` instead.
+	HTTPURLKey = attribute.Key("http.url")
+
+	// HTTPUserAgentKey is the attribute Key conforming to the
+	// "http.user_agent" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+	// iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+	// Version/14.1.2 Mobile/15E148 Safari/604.1'
+	// Deprecated: use `user_agent.original` instead.
+	HTTPUserAgentKey = attribute.Key("http.user_agent")
+)
+
+var (
+	// HTTP/1.0
+	//
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+	// HTTP/1.1
+	//
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+	// HTTP/2
+	//
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+	// HTTP/3
+	//
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+	// SPDY protocol
+	//
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+	// QUIC protocol
+	//
+	// Deprecated: use `network.protocol.name` instead.
+	HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions.
+//
+// Deprecated: use `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+	return HTTPMethodKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions.
+//
+// Deprecated: use `http.request.header.content-length` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+	return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions.
+//
+// Deprecated: use `http.response.header.content-length` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+	return HTTPResponseContentLengthKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions.
+//
+// Deprecated: use `url.scheme` instead.
+func HTTPScheme(val string) attribute.KeyValue {
+	return HTTPSchemeKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions.
+//
+// Deprecated: use `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+	return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions.
+//
+// Deprecated: use `url.path` and `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+	return HTTPTargetKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions.
+//
+// Deprecated: use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+	return HTTPURLKey.String(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions.
+//
+// Deprecated: use `user_agent.original` instead.
+func HTTPUserAgent(val string) attribute.KeyValue {
+	return HTTPUserAgentKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetHostNameKey is the attribute Key conforming to the "net.host.name"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'example.com'
+	// Deprecated: use `server.address`.
+	NetHostNameKey = attribute.Key("net.host.name")
+
+	// NetHostPortKey is the attribute Key conforming to the "net.host.port"
+	// semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	// Deprecated: use `server.port`.
+	NetHostPortKey = attribute.Key("net.host.port")
+
+	// NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'example.com'
+	// Deprecated: use `server.address` on client spans and `client.address` on
+	// server spans.
+	NetPeerNameKey = attribute.Key("net.peer.name")
+
+	// NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+	// semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	// Deprecated: use `server.port` on client spans and `client.port` on
+	// server spans.
+	NetPeerPortKey = attribute.Key("net.peer.port")
+
+	// NetProtocolNameKey is the attribute Key conforming to the
+	// "net.protocol.name" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'amqp', 'http', 'mqtt'
+	// Deprecated: use `network.protocol.name`.
+	NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+	// NetProtocolVersionKey is the attribute Key conforming to the
+	// "net.protocol.version" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '3.1.1'
+	// Deprecated: use `network.protocol.version`.
+	NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+	// NetSockFamilyKey is the attribute Key conforming to the
+	// "net.sock.family" semantic conventions.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyKey = attribute.Key("net.sock.family")
+
+	// NetSockHostAddrKey is the attribute Key conforming to the
+	// "net.sock.host.addr" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/var/my.sock'
+	// Deprecated: use `network.local.address`.
+	NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+	// NetSockHostPortKey is the attribute Key conforming to the
+	// "net.sock.host.port" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	// Deprecated: use `network.local.port`.
+	NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+	// NetSockPeerAddrKey is the attribute Key conforming to the
+	// "net.sock.peer.addr" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '192.168.0.1'
+	// Deprecated: use `network.peer.address`.
+	NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+	// NetSockPeerNameKey is the attribute Key conforming to the
+	// "net.sock.peer.name" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/var/my.sock'
+	// Deprecated: no replacement at this time.
+	NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+	// NetSockPeerPortKey is the attribute Key conforming to the
+	// "net.sock.peer.port" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 65531
+	// Deprecated: use `network.peer.port`.
+	NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+	// NetTransportKey is the attribute Key conforming to the "net.transport"
+	// semantic conventions.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Deprecated: use `network.transport`.
+	NetTransportKey = attribute.Key("net.transport")
+)
+
+var (
+	// IPv4 address
+	//
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyInet = NetSockFamilyKey.String("inet")
+	// IPv6 address
+	//
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+	// Unix domain socket path
+	//
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+	// ip_tcp
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportTCP = NetTransportKey.String("ip_tcp")
+	// ip_udp
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportUDP = NetTransportKey.String("ip_udp")
+	// Named or anonymous pipe
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportPipe = NetTransportKey.String("pipe")
+	// In-process communication
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportInProc = NetTransportKey.String("inproc")
+	// Something else (non IP-based)
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportOther = NetTransportKey.String("other")
+)
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions.
+//
+// Deprecated: use `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+	return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions.
+//
+// Deprecated: use `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+	return NetHostPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions.
+//
+// Deprecated: use `server.address` on client spans and `client.address` on
+// server spans.
+func NetPeerName(val string) attribute.KeyValue {
+	return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions.
+//
+// Deprecated: use `server.port` on client spans and `client.port` on server
+// spans.
+func NetPeerPort(val int) attribute.KeyValue {
+	return NetPeerPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions.
+//
+// Deprecated: use `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+	return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions.
+//
+// Deprecated: use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+	return NetProtocolVersionKey.String(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions.
+//
+// Deprecated: use `network.local.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+	return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions.
+//
+// Deprecated: use `network.local.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+	return NetSockHostPortKey.Int(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions.
+//
+// Deprecated: use `network.peer.address`.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+	return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions.
+//
+// Deprecated: no replacement at this time.
+func NetSockPeerName(val string) attribute.KeyValue {
+	return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions.
+//
+// Deprecated: use `network.peer.port`.
+func NetSockPeerPort(val int) attribute.KeyValue {
+	return NetSockPeerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+	// DestinationAddressKey is the attribute Key conforming to the
+	// "destination.address" semantic conventions. It represents the
+	// destination address - domain name if available without reverse DNS
+	// lookup; otherwise, IP address or Unix domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the source side, and when communicating through
+	// an intermediary, `destination.address` SHOULD represent the destination
+	// address behind any intermediaries, for example proxies, if it's
+	// available.
+	DestinationAddressKey = attribute.Key("destination.address")
+
+	// DestinationPortKey is the attribute Key conforming to the
+	// "destination.port" semantic conventions. It represents the destination
+	// port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3389, 2888
+	DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+	return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+	return DestinationPortKey.Int(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+	// DiskIoDirectionKey is the attribute Key conforming to the
+	// "disk.io.direction" semantic conventions. It represents the disk IO
+	// operation direction.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'read'
+	DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+	// read
+	DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+	// write
+	DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report an error.
+const (
+	// ErrorTypeKey is the attribute Key conforming to the "error.type"
+	// semantic conventions. It represents the describes a class of error the
+	// operation ended with.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'timeout', 'java.net.UnknownHostException',
+	// 'server_certificate_invalid', '500'
+	// Note: The `error.type` SHOULD be predictable and SHOULD have low
+	// cardinality.
+	// Instrumentations SHOULD document the list of errors they report.
+	//
+	// The cardinality of `error.type` within one instrumentation library
+	// SHOULD be low.
+	// Telemetry consumers that aggregate data from multiple instrumentation
+	// libraries and applications
+	// should be prepared for `error.type` to have high cardinality at query
+	// time when no
+	// additional filters are applied.
+	//
+	// If the operation has completed successfully, instrumentations SHOULD NOT
+	// set `error.type`.
+	//
+	// If a specific domain defines its own set of error identifiers (such as
+	// HTTP or gRPC status codes),
+	// it's RECOMMENDED to:
+	//
+	// * Use a domain-specific attribute
+	// * Set `error.type` to capture all errors, regardless of whether they are
+	// defined within the domain-specific set or not.
+	ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+	// A fallback error value to be used when the instrumentation doesn't define a custom value
+	ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+	// ExceptionEscapedKey is the attribute Key conforming to the
+	// "exception.escaped" semantic conventions. It represents the sHOULD be
+	// set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: An exception is considered to have escaped (or left) the scope of
+	// a span,
+	// if that span is ended while the exception is still logically "in
+	// flight".
+	// This may be actually "in flight" in some languages (e.g. if the
+	// exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most
+	// languages.
+	//
+	// It is usually not possible to determine at the point where an exception
+	// is thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending
+	// the span,
+	// as done in the [example for recording span
+	// exceptions](#recording-an-exception).
+	//
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+	// ExceptionMessageKey is the attribute Key conforming to the
+	// "exception.message" semantic conventions. It represents the exception
+	// message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Division by zero', "Can't convert 'int' object to str
+	// implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+
+	// ExceptionStacktraceKey is the attribute Key conforming to the
+	// "exception.stacktrace" semantic conventions. It represents a stacktrace
+	// as a string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+	// semantic conventions. It represents the type of the exception (its
+	// fully-qualified class name, if applicable). The dynamic type of the
+	// exception should be preferred over the static type in languages that
+	// support it.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+	return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+	return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+	return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+	return ExceptionTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+	// HTTPRequestBodySizeKey is the attribute Key conforming to the
+	// "http.request.body.size" semantic conventions. It represents the size of
+	// the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3495
+	HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+	// HTTPRequestMethodKey is the attribute Key conforming to the
+	// "http.request.method" semantic conventions. It represents the hTTP
+	// request method.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'GET', 'POST', 'HEAD'
+	// Note: HTTP request method value SHOULD be "known" to the
+	// instrumentation.
+	// By default, this convention defines "known" methods as the ones listed
+	// in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+	// and the PATCH method defined in
+	// [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+	//
+	// If the HTTP request method is not known to instrumentation, it MUST set
+	// the `http.request.method` attribute to `_OTHER`.
+	//
+	// If the HTTP instrumentation could end up converting valid HTTP request
+	// methods to `_OTHER`, then it MUST provide a way to override
+	// the list of known HTTP methods. If this override is done via environment
+	// variable, then the environment variable MUST be named
+	// OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+	// list of case-sensitive known HTTP methods
+	// (this list MUST be a full override of the default known method, it is
+	// not a list of known methods in addition to the defaults).
+	//
+	// HTTP method names are case-sensitive and `http.request.method` attribute
+	// value MUST match a known HTTP method name exactly.
+	// Instrumentations for specific web frameworks that consider HTTP methods
+	// to be case insensitive, SHOULD populate a canonical equivalent.
+	// Tracing instrumentations that do so, MUST also set
+	// `http.request.method_original` to the original value.
+	HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+	// HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+	// "http.request.method_original" semantic conventions. It represents the
+	// original HTTP method sent by the client in the request line.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'GeT', 'ACL', 'foo'
+	HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+	// HTTPRequestResendCountKey is the attribute Key conforming to the
+	// "http.request.resend_count" semantic conventions. It represents the
+	// ordinal number of request resending attempt (for any reason, including
+	// redirects).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3
+	// Note: The resend count SHOULD be updated each time an HTTP request gets
+	// resent by the client, regardless of what was the cause of the resending
+	// (e.g. redirection, authorization failure, 503 Server Unavailable,
+	// network issues, or any other).
+	HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+	// HTTPResponseBodySizeKey is the attribute Key conforming to the
+	// "http.response.body.size" semantic conventions. It represents the size
+	// of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3495
+	HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+	// HTTPResponseStatusCodeKey is the attribute Key conforming to the
+	// "http.response.status_code" semantic conventions. It represents the
+	// [HTTP response status
+	// code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 200
+	HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+	// HTTPRouteKey is the attribute Key conforming to the "http.route"
+	// semantic conventions. It represents the matched route, that is, the path
+	// template in the format used by the respective server framework.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+	// Note: MUST NOT be populated when this is not supported by the HTTP
+	// server framework as the route attribute should have low-cardinality and
+	// the URI path can NOT substitute it.
+	// SHOULD include the [application
+	// root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+	HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+	// CONNECT method
+	HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+	// DELETE method
+	HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+	// GET method
+	HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+	// HEAD method
+	HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+	// OPTIONS method
+	HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+	// PATCH method
+	HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+	// POST method
+	HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+	// PUT method
+	HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+	// TRACE method
+	HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+	// Any HTTP method that the instrumentation has no prior knowledge of
+	HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+	return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+	return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+	return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+	return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+	return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+	return HTTPRouteKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+	// MessagingBatchMessageCountKey is the attribute Key conforming to the
+	// "messaging.batch.message_count" semantic conventions. It represents the
+	// number of messages sent, received, or processed in the scope of the
+	// batching operation.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 1, 2
+	// Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+	// spans that operate with a single message. When a messaging client
+	// library supports both batch and single-message API for the same
+	// operation, instrumentations SHOULD use `messaging.batch.message_count`
+	// for batching APIs and SHOULD NOT use it for single-message APIs.
+	MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+	// MessagingClientIDKey is the attribute Key conforming to the
+	// "messaging.client_id" semantic conventions. It represents a unique
+	// identifier for the client that consumes or produces a message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'client-5', 'myhost@8742@s8083jm'
+	MessagingClientIDKey = attribute.Key("messaging.client_id")
+
+	// MessagingDestinationAnonymousKey is the attribute Key conforming to the
+	// "messaging.destination.anonymous" semantic conventions. It represents a
+	// boolean that is true if the message destination is anonymous (could be
+	// unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+	// MessagingDestinationNameKey is the attribute Key conforming to the
+	// "messaging.destination.name" semantic conventions. It represents the
+	// message destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: Destination name SHOULD uniquely identify a specific queue, topic
+	// or other entity within the broker. If
+	// the broker doesn't have such notion, the destination name SHOULD
+	// uniquely identify the broker.
+	MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+	// MessagingDestinationTemplateKey is the attribute Key conforming to the
+	// "messaging.destination.template" semantic conventions. It represents the
+	// low cardinality representation of the messaging destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/customers/{customerID}'
+	// Note: Destination names could be constructed from templates. An example
+	// would be a destination name involving a user name or product id.
+	// Although the destination name in this case is of high cardinality, the
+	// underlying template is of low cardinality and can be effectively used
+	// for grouping and aggregation.
+	MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+	// MessagingDestinationTemporaryKey is the attribute Key conforming to the
+	// "messaging.destination.temporary" semantic conventions. It represents a
+	// boolean that is true if the message destination is temporary and might
+	// not exist anymore after messages are processed.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+	// MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+	// to the "messaging.destination_publish.anonymous" semantic conventions.
+	// It represents a boolean that is true if the publish message destination
+	// is anonymous (could be unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+	// MessagingDestinationPublishNameKey is the attribute Key conforming to
+	// the "messaging.destination_publish.name" semantic conventions. It
+	// represents the name of the original destination the message was
+	// published to
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: The name SHOULD uniquely identify a specific queue, topic, or
+	// other entity within the broker. If
+	// the broker doesn't have such notion, the original destination name
+	// SHOULD uniquely identify the broker.
+	MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+	// MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+	// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+	// It represents the ordering key for a given message. If the attribute is
+	// not present, the message does not have an ordering key.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ordering_key'
+	MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+	// MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+	// "messaging.kafka.consumer.group" semantic conventions. It represents the
+	// name of the Kafka Consumer Group that is handling the message. Only
+	// applies to consumers, not producers.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+	// MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+	// the "messaging.kafka.destination.partition" semantic conventions. It
+	// represents the partition the message is sent to.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2
+	MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+	// MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+	// "messaging.kafka.message.key" semantic conventions. It represents the
+	// message keys in Kafka are used for grouping alike messages to ensure
+	// they're processed on the same partition. They differ from
+	// `messaging.message.id` in that they're not unique. If the key is `null`,
+	// the attribute MUST NOT be set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to
+	// be supplied for the attribute. If the key has no unambiguous, canonical
+	// string form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+	// MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+	// "messaging.kafka.message.offset" semantic conventions. It represents the
+	// offset of a record in the corresponding Kafka partition.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+	// MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+	// "messaging.kafka.message.tombstone" semantic conventions. It represents
+	// a boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+	// MessagingMessageBodySizeKey is the attribute Key conforming to the
+	// "messaging.message.body.size" semantic conventions. It represents the
+	// size of the message body in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1439
+	// Note: This can refer to both the compressed or uncompressed body size.
+	// If both sizes are known, the uncompressed
+	// body size should be used.
+	MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+	// MessagingMessageConversationIDKey is the attribute Key conforming to the
+	// "messaging.message.conversation_id" semantic conventions. It represents
+	// the conversation ID identifying the conversation to which the message
+	// belongs, represented as a string. Sometimes called "Correlation ID".
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyConversationID'
+	MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+	// MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+	// "messaging.message.envelope.size" semantic conventions. It represents
+	// the size of the message body and metadata in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2738
+	// Note: This can refer to both the compressed or uncompressed size. If
+	// both sizes are known, the uncompressed
+	// size should be used.
+	MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+	// MessagingMessageIDKey is the attribute Key conforming to the
+	// "messaging.message.id" semantic conventions. It represents a value used
+	// by the messaging system as an identifier for the message, represented as
+	// a string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+	// MessagingOperationKey is the attribute Key conforming to the
+	// "messaging.operation" semantic conventions. It represents a string
+	// identifying the kind of messaging operation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: If a custom value is used, it MUST be of low cardinality.
+	MessagingOperationKey = attribute.Key("messaging.operation")
+
+	// MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+	// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+	// conventions. It represents the rabbitMQ message routing key.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myKey'
+	MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+	// MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.client_group" semantic conventions. It represents
+	// the name of the RocketMQ producer/consumer group that is handling the
+	// message. The client type is identified by the SpanKind.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+	// MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+	// the "messaging.rocketmq.consumption_model" semantic conventions. It
+	// represents the model of message consumption. This only applies to
+	// consumer spans.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+	// MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+	// conventions. It represents the delay time level for delay message, which
+	// determines the message delay time.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3
+	MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+	// MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delivery_timestamp"
+	// semantic conventions. It represents the timestamp in milliseconds that
+	// the delay message is expected to be delivered to consumer.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1665987217045
+	MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+	// MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.group" semantic conventions. It represents
+	// the it is essential for FIFO message. Messages that belong to the same
+	// message group are always processed one by one within the same consumer
+	// group.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myMessageGroup'
+	MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+	// MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.keys" semantic conventions. It represents
+	// the key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+	// MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+	// secondary classifier of message besides topic.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+	// MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.type" semantic conventions. It represents
+	// the type of message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+	// MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+	// "messaging.rocketmq.namespace" semantic conventions. It represents the
+	// namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+	// MessagingSystemKey is the attribute Key conforming to the
+	// "messaging.system" semantic conventions. It represents an identifier for
+	// the messaging system being used. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+	// One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+	MessagingOperationPublish = MessagingOperationKey.String("publish")
+	// A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+	MessagingOperationCreate = MessagingOperationKey.String("create")
+	// One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+	MessagingOperationReceive = MessagingOperationKey.String("receive")
+	// One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs
+	MessagingOperationDeliver = MessagingOperationKey.String("deliver")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+	// Apache ActiveMQ
+	MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+	// Amazon Simple Queue Service (SQS)
+	MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+	// Azure Event Grid
+	MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid")
+	// Azure Event Hubs
+	MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs")
+	// Azure Service Bus
+	MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus")
+	// Google Cloud Pub/Sub
+	MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+	// Java Message Service
+	MessagingSystemJms = MessagingSystemKey.String("jms")
+	// Apache Kafka
+	MessagingSystemKafka = MessagingSystemKey.String("kafka")
+	// RabbitMQ
+	MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+	// Apache RocketMQ
+	MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+	return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+	return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+	return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+	return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+	return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+	return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+	return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+	return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+	return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+	return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+	return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+	return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+	return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+	return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+	return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+	return MessagingMessageIDKey.String(val)
+}
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+	return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+	return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+	return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetworkCarrierIccKey is the attribute Key conforming to the
+	// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+	// alpha-2 2-character country code associated with the mobile carrier
+	// network.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'DE'
+	NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+	// NetworkCarrierMccKey is the attribute Key conforming to the
+	// "network.carrier.mcc" semantic conventions. It represents the mobile
+	// carrier country code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '310'
+	NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+	// NetworkCarrierMncKey is the attribute Key conforming to the
+	// "network.carrier.mnc" semantic conventions. It represents the mobile
+	// carrier network code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '001'
+	NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+	// NetworkCarrierNameKey is the attribute Key conforming to the
+	// "network.carrier.name" semantic conventions. It represents the name of
+	// the mobile carrier.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'sprint'
+	NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+	// NetworkConnectionSubtypeKey is the attribute Key conforming to the
+	// "network.connection.subtype" semantic conventions. It represents the
+	// this describes more details regarding the connection.type. It may be the
+	// type of cell technology connection, but it could be used for describing
+	// details about a wifi connection.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'LTE'
+	NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+	// NetworkConnectionTypeKey is the attribute Key conforming to the
+	// "network.connection.type" semantic conventions. It represents the
+	// internet connection type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'wifi'
+	NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+	// NetworkIoDirectionKey is the attribute Key conforming to the
+	// "network.io.direction" semantic conventions. It represents the network
+	// IO operation direction.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'transmit'
+	NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+	// NetworkLocalAddressKey is the attribute Key conforming to the
+	// "network.local.address" semantic conventions. It represents the local
+	// address of the network connection - IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10.1.2.80', '/tmp/my.sock'
+	NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+	// NetworkLocalPortKey is the attribute Key conforming to the
+	// "network.local.port" semantic conventions. It represents the local port
+	// number of the network connection.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	NetworkLocalPortKey = attribute.Key("network.local.port")
+
+	// NetworkPeerAddressKey is the attribute Key conforming to the
+	// "network.peer.address" semantic conventions. It represents the peer
+	// address of the network connection - IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10.1.2.80', '/tmp/my.sock'
+	NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+	// NetworkPeerPortKey is the attribute Key conforming to the
+	// "network.peer.port" semantic conventions. It represents the peer port
+	// number of the network connection.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+	// NetworkProtocolNameKey is the attribute Key conforming to the
+	// "network.protocol.name" semantic conventions. It represents the [OSI
+	// application layer](https://osi-model.com/application-layer/) or non-OSI
+	// equivalent.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'amqp', 'http', 'mqtt'
+	// Note: The value SHOULD be normalized to lowercase.
+	NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+	// NetworkProtocolVersionKey is the attribute Key conforming to the
+	// "network.protocol.version" semantic conventions. It represents the
+	// version of the protocol specified in `network.protocol.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '3.1.1'
+	// Note: `network.protocol.version` refers to the version of the protocol
+	// used and might be different from the protocol client's version. If the
+	// HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`,
+	// this attribute should be set to `1.1`.
+	NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+	// NetworkTransportKey is the attribute Key conforming to the
+	// "network.transport" semantic conventions. It represents the [OSI
+	// transport layer](https://osi-model.com/transport-layer/) or
+	// [inter-process communication
+	// method](https://wikipedia.org/wiki/Inter-process_communication).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'tcp', 'udp'
+	// Note: The value SHOULD be normalized to lowercase.
+	//
+	// Consider always setting the transport when setting a port number, since
+	// a port number is ambiguous without knowing the transport. For example
+	// different processes could be listening on TCP port 12345 and UDP port
+	// 12345.
+	NetworkTransportKey = attribute.Key("network.transport")
+
+	// NetworkTypeKey is the attribute Key conforming to the "network.type"
+	// semantic conventions. It represents the [OSI network
+	// layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ipv4', 'ipv6'
+	// Note: The value SHOULD be normalized to lowercase.
+	NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+	// GPRS
+	NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+	// wifi
+	NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+	// wired
+	NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+	// cell
+	NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+	// unavailable
+	NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+	// unknown
+	NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+	// transmit
+	NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+	// receive
+	NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+	// TCP
+	NetworkTransportTCP = NetworkTransportKey.String("tcp")
+	// UDP
+	NetworkTransportUDP = NetworkTransportKey.String("udp")
+	// Named or anonymous pipe
+	NetworkTransportPipe = NetworkTransportKey.String("pipe")
+	// Unix domain socket
+	NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+	// IPv4
+	NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+	// IPv6
+	NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+	return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+	return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+	return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+	return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+	return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+	return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+	return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+	return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+	return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the protocol specified in `network.protocol.name`.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+	return NetworkProtocolVersionKey.String(val)
+}
+
+// Attributes for remote procedure calls.
+const (
+	// RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+	// "rpc.connect_rpc.error_code" semantic conventions. It represents the
+	// [error codes](https://connect.build/docs/protocol/#error-codes) of the
+	// Connect request. Error codes are always string values.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+	// RPCGRPCStatusCodeKey is the attribute Key conforming to the
+	// "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+	// status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+	// the gRPC request.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+	// RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+	// RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+	// RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+	// property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be
+	// cast to string for simplicity. Use empty string in case of `null` value.
+	// Omit entirely if this is a notification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+	// RPCJsonrpcVersionKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+	// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+	// doesn't specify this, the value can be omitted.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+	// RPCMethodKey is the attribute Key conforming to the "rpc.method"
+	// semantic conventions. It represents the name of the (logical) method
+	// being called, must be equal to the $method part in the span name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the
+	// latter (e.g., method actually executing the call on the server side, RPC
+	// client stub method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+
+	// RPCServiceKey is the attribute Key conforming to the "rpc.service"
+	// semantic conventions. It represents the full (logical) name of the
+	// service being called, including its package name, if applicable.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// class. The `code.namespace` attribute may be used to store the latter
+	// (despite the attribute name, it may include a class name; e.g., class
+	// with method actually executing the call on the server side, RPC client
+	// stub class on the client side).
+	RPCServiceKey = attribute.Key("rpc.service")
+
+	// RPCSystemKey is the attribute Key conforming to the "rpc.system"
+	// semantic conventions. It represents a string identifying the remoting
+	// system. See below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+	// cancelled
+	RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+	// unknown
+	RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+	// invalid_argument
+	RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+	// deadline_exceeded
+	RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+	// not_found
+	RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+	// already_exists
+	RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+	// permission_denied
+	RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+	// resource_exhausted
+	RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+	// failed_precondition
+	RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+	// aborted
+	RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+	// out_of_range
+	RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+	// unimplemented
+	RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+	// internal
+	RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+	// unavailable
+	RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+	// data_loss
+	RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+	// unauthenticated
+	RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+	// Connect RPC
+	RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+	return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+	return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+	return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+	return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+	return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+	return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+	// ServerAddressKey is the attribute Key conforming to the "server.address"
+	// semantic conventions. It represents the server domain name if available
+	// without reverse DNS lookup; otherwise, IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the client side, and when communicating through
+	// an intermediary, `server.address` SHOULD represent the server address
+	// behind any intermediaries, for example proxies, if it's available.
+	ServerAddressKey = attribute.Key("server.address")
+
+	// ServerPortKey is the attribute Key conforming to the "server.port"
+	// semantic conventions. It represents the server port number.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 80, 8080, 443
+	// Note: When observed from the client side, and when communicating through
+	// an intermediary, `server.port` SHOULD represent the server port behind
+	// any intermediaries, for example proxies, if it's available.
+	ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+	return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+	return ServerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+	// SourceAddressKey is the attribute Key conforming to the "source.address"
+	// semantic conventions. It represents the source address - domain name if
+	// available without reverse DNS lookup; otherwise, IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the destination side, and when communicating
+	// through an intermediary, `source.address` SHOULD represent the source
+	// address behind any intermediaries, for example proxies, if it's
+	// available.
+	SourceAddressKey = attribute.Key("source.address")
+
+	// SourcePortKey is the attribute Key conforming to the "source.port"
+	// semantic conventions. It represents the source port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3389, 2888
+	SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+	return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+	return SourcePortKey.Int(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+	// TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+	// semantic conventions. It represents the string indicating the
+	// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+	// used during the current connection.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+	// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+	// Note: The values allowed for `tls.cipher` MUST be one of the
+	// `Descriptions` of the [registered TLS Cipher
+	// Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+	TLSCipherKey = attribute.Key("tls.cipher")
+
+	// TLSClientCertificateKey is the attribute Key conforming to the
+	// "tls.client.certificate" semantic conventions. It represents the
+	// pEM-encoded stand-alone certificate offered by the client. This is
+	// usually mutually-exclusive of `client.certificate_chain` since this
+	// value also exists in that list.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...'
+	TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+	// TLSClientCertificateChainKey is the attribute Key conforming to the
+	// "tls.client.certificate_chain" semantic conventions. It represents the
+	// array of PEM-encoded certificates that make up the certificate chain
+	// offered by the client. This is usually mutually-exclusive of
+	// `client.certificate` since that value should be the first certificate in
+	// the chain.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...', 'MI...'
+	TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+	// TLSClientHashMd5Key is the attribute Key conforming to the
+	// "tls.client.hash.md5" semantic conventions. It represents the
+	// certificate fingerprint using the MD5 digest of DER-encoded version of
+	// certificate offered by the client. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+	TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+	// TLSClientHashSha1Key is the attribute Key conforming to the
+	// "tls.client.hash.sha1" semantic conventions. It represents the
+	// certificate fingerprint using the SHA1 digest of DER-encoded version of
+	// certificate offered by the client. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+	TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+	// TLSClientHashSha256Key is the attribute Key conforming to the
+	// "tls.client.hash.sha256" semantic conventions. It represents the
+	// certificate fingerprint using the SHA256 digest of DER-encoded version
+	// of certificate offered by the client. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+	TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+	// TLSClientIssuerKey is the attribute Key conforming to the
+	// "tls.client.issuer" semantic conventions. It represents the
+	// distinguished name of
+	// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+	// of the issuer of the x.509 certificate presented by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+	// DC=com'
+	TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+	// TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+	// semantic conventions. It represents a hash that identifies clients based
+	// on how they perform an SSL/TLS handshake.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'd4e5b18d6b55c71272893221c96ba240'
+	TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+	// TLSClientNotAfterKey is the attribute Key conforming to the
+	// "tls.client.not_after" semantic conventions. It represents the date/Time
+	// indicating when client certificate is no longer considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021-01-01T00:00:00.000Z'
+	TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+	// TLSClientNotBeforeKey is the attribute Key conforming to the
+	// "tls.client.not_before" semantic conventions. It represents the
+	// date/Time indicating when client certificate is first considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1970-01-01T00:00:00.000Z'
+	TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+	// TLSClientServerNameKey is the attribute Key conforming to the
+	// "tls.client.server_name" semantic conventions. It represents the also
+	// called an SNI, this tells the server which hostname to which the client
+	// is attempting to connect to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry.io'
+	TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+	// TLSClientSubjectKey is the attribute Key conforming to the
+	// "tls.client.subject" semantic conventions. It represents the
+	// distinguished name of subject of the x.509 certificate presented by the
+	// client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+	TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+	// TLSClientSupportedCiphersKey is the attribute Key conforming to the
+	// "tls.client.supported_ciphers" semantic conventions. It represents the
+	// array of ciphers offered by the client during the client hello.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+	// "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+	TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+	// TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+	// conventions. It represents the string indicating the curve used for the
+	// given cipher, when applicable
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'secp256r1'
+	TLSCurveKey = attribute.Key("tls.curve")
+
+	// TLSEstablishedKey is the attribute Key conforming to the
+	// "tls.established" semantic conventions. It represents the boolean flag
+	// indicating if the TLS negotiation was successful and transitioned to an
+	// encrypted tunnel.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: True
+	TLSEstablishedKey = attribute.Key("tls.established")
+
+	// TLSNextProtocolKey is the attribute Key conforming to the
+	// "tls.next_protocol" semantic conventions. It represents the string
+	// indicating the protocol being tunneled. Per the values in the [IANA
+	// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+	// this string should be lower case.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'http/1.1'
+	TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+	// TLSProtocolNameKey is the attribute Key conforming to the
+	// "tls.protocol.name" semantic conventions. It represents the normalized
+	// lowercase protocol name parsed from original string of the negotiated
+	// [SSL/TLS protocol
+	// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+	// TLSProtocolVersionKey is the attribute Key conforming to the
+	// "tls.protocol.version" semantic conventions. It represents the numeric
+	// part of the version parsed from the original string of the negotiated
+	// [SSL/TLS protocol
+	// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.2', '3'
+	TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+	// TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+	// semantic conventions. It represents the boolean flag indicating if this
+	// TLS connection was resumed from an existing TLS negotiation.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: True
+	TLSResumedKey = attribute.Key("tls.resumed")
+
+	// TLSServerCertificateKey is the attribute Key conforming to the
+	// "tls.server.certificate" semantic conventions. It represents the
+	// pEM-encoded stand-alone certificate offered by the server. This is
+	// usually mutually-exclusive of `server.certificate_chain` since this
+	// value also exists in that list.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...'
+	TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+	// TLSServerCertificateChainKey is the attribute Key conforming to the
+	// "tls.server.certificate_chain" semantic conventions. It represents the
+	// array of PEM-encoded certificates that make up the certificate chain
+	// offered by the server. This is usually mutually-exclusive of
+	// `server.certificate` since that value should be the first certificate in
+	// the chain.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...', 'MI...'
+	TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+	// TLSServerHashMd5Key is the attribute Key conforming to the
+	// "tls.server.hash.md5" semantic conventions. It represents the
+	// certificate fingerprint using the MD5 digest of DER-encoded version of
+	// certificate offered by the server. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+	TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+	// TLSServerHashSha1Key is the attribute Key conforming to the
+	// "tls.server.hash.sha1" semantic conventions. It represents the
+	// certificate fingerprint using the SHA1 digest of DER-encoded version of
+	// certificate offered by the server. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+	TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+	// TLSServerHashSha256Key is the attribute Key conforming to the
+	// "tls.server.hash.sha256" semantic conventions. It represents the
+	// certificate fingerprint using the SHA256 digest of DER-encoded version
+	// of certificate offered by the server. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+	TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+	// TLSServerIssuerKey is the attribute Key conforming to the
+	// "tls.server.issuer" semantic conventions. It represents the
+	// distinguished name of
+	// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+	// of the issuer of the x.509 certificate presented by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+	// DC=com'
+	TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+	// TLSServerJa3sKey is the attribute Key conforming to the
+	// "tls.server.ja3s" semantic conventions. It represents a hash that
+	// identifies servers based on how they perform an SSL/TLS handshake.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'd4e5b18d6b55c71272893221c96ba240'
+	TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+	// TLSServerNotAfterKey is the attribute Key conforming to the
+	// "tls.server.not_after" semantic conventions. It represents the date/Time
+	// indicating when server certificate is no longer considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021-01-01T00:00:00.000Z'
+	TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+	// TLSServerNotBeforeKey is the attribute Key conforming to the
+	// "tls.server.not_before" semantic conventions. It represents the
+	// date/Time indicating when server certificate is first considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1970-01-01T00:00:00.000Z'
+	TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+	// TLSServerSubjectKey is the attribute Key conforming to the
+	// "tls.server.subject" semantic conventions. It represents the
+	// distinguished name of subject of the x.509 certificate presented by the
+	// server.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+	TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+	// ssl
+	TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+	// tls
+	TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+	return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+	return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+	return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+	return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+	return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+	return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+	return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+	return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+	return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+	return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+	return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+	return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+	return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+	return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+	return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+	return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+	return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+	return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+	return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+	return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+	return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+	return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+	return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+	return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+	return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+	return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+	return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+	return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+	// URLFragmentKey is the attribute Key conforming to the "url.fragment"
+	// semantic conventions. It represents the [URI
+	// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'SemConv'
+	URLFragmentKey = attribute.Key("url.fragment")
+
+	// URLFullKey is the attribute Key conforming to the "url.full" semantic
+	// conventions. It represents the absolute URL describing a network
+	// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+	// '//localhost'
+	// Note: For network calls, URL usually has
+	// `scheme://host[:port][path][?query][#fragment]` format, where the
+	// fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+	// included nevertheless.
+	// `url.full` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case username and
+	// password SHOULD be redacted and attribute's value SHOULD be
+	// `https://REDACTED:REDACTED@www.example.com/`.
+	// `url.full` SHOULD capture the absolute URL when it is available (or can
+	// be reconstructed) and SHOULD NOT be validated or modified except for
+	// sanitizing purposes.
+	URLFullKey = attribute.Key("url.full")
+
+	// URLPathKey is the attribute Key conforming to the "url.path" semantic
+	// conventions. It represents the [URI
+	// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/search'
+	URLPathKey = attribute.Key("url.path")
+
+	// URLQueryKey is the attribute Key conforming to the "url.query" semantic
+	// conventions. It represents the [URI
+	// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'q=OpenTelemetry'
+	// Note: Sensitive content provided in query string SHOULD be scrubbed when
+	// instrumentations can identify it.
+	URLQueryKey = attribute.Key("url.query")
+
+	// URLSchemeKey is the attribute Key conforming to the "url.scheme"
+	// semantic conventions. It represents the [URI
+	// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+	// identifying the used protocol.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'https', 'ftp', 'telnet'
+	URLSchemeKey = attribute.Key("url.scheme")
+)
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+	return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+	return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+	return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+	return URLQueryKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+	return URLSchemeKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+	// UserAgentOriginalKey is the attribute Key conforming to the
+	// "user_agent.original" semantic conventions. It represents the value of
+	// the [HTTP
+	// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+	// header sent by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+	// iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+	// Version/14.1.2 Mobile/15E148 Safari/604.1'
+	UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+	return UserAgentOriginalKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+	// SessionIDKey is the attribute Key conforming to the "session.id"
+	// semantic conventions. It represents a unique id to identify a session.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
+	SessionIDKey = attribute.Key("session.id")
+
+	// SessionPreviousIDKey is the attribute Key conforming to the
+	// "session.previous_id" semantic conventions. It represents the previous
+	// `session.id` for this user, when known.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
+	SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+	return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+	return SessionPreviousIDKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..9b802db27228615df99448a18de1d9642490b482
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.24.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
new file mode 100644
index 0000000000000000000000000000000000000000..cd3c7162955f5667ca1bdc0a9380d2ed70c3490d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
@@ -0,0 +1,211 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This event represents an occurrence of a lifecycle transition on the iOS
+// platform.
+const (
+	// IosStateKey is the attribute Key conforming to the "ios.state" semantic
+	// conventions. It represents the this attribute represents the state the
+	// application has transitioned into at the occurrence of the event.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
+	// documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
+	// and from which the `OS terminology` column values are derived.
+	IosStateKey = attribute.Key("ios.state")
+)
+
+var (
+	// The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
+	IosStateActive = IosStateKey.String("active")
+	// The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
+	IosStateInactive = IosStateKey.String("inactive")
+	// The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
+	IosStateBackground = IosStateKey.String("background")
+	// The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
+	IosStateForeground = IosStateKey.String("foreground")
+	// The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
+	IosStateTerminate = IosStateKey.String("terminate")
+)
+
+// This event represents an occurrence of a lifecycle transition on the Android
+// platform.
+const (
+	// AndroidStateKey is the attribute Key conforming to the "android.state"
+	// semantic conventions. It represents the this attribute represents the
+	// state the application has transitioned into at the occurrence of the
+	// event.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Note: The Android lifecycle states are defined in [Activity lifecycle
+	// callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+	// and from which the `OS identifiers` are derived.
+	AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+	// Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+	AndroidStateCreated = AndroidStateKey.String("created")
+	// Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+	AndroidStateBackground = AndroidStateKey.String("background")
+	// Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+	AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+	// FeatureFlagKeyKey is the attribute Key conforming to the
+	// "feature_flag.key" semantic conventions. It represents the unique
+	// identifier of the feature flag.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'logo-color'
+	FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+	// FeatureFlagProviderNameKey is the attribute Key conforming to the
+	// "feature_flag.provider_name" semantic conventions. It represents the
+	// name of the service provider that performs the flag evaluation.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'Flag Manager'
+	FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+	// FeatureFlagVariantKey is the attribute Key conforming to the
+	// "feature_flag.variant" semantic conventions. It represents the sHOULD be
+	// a semantic identifier for a value. If one is unavailable, a stringified
+	// version of the value can be used.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'red', 'true', 'on'
+	// Note: A semantic identifier, commonly referred to as a variant, provides
+	// a means
+	// for referring to a value without including the value itself. This can
+	// provide additional context for understanding the meaning behind a value.
+	// For example, the variant `red` maybe be used for the value `#c05543`.
+	//
+	// A stringified version of the value can be used in situations where a
+	// semantic identifier is unavailable. String representation of the value
+	// should be determined by the implementer.
+	FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+	return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+	return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+	return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+	// MessageCompressedSizeKey is the attribute Key conforming to the
+	// "message.compressed_size" semantic conventions. It represents the
+	// compressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+	// MessageIDKey is the attribute Key conforming to the "message.id"
+	// semantic conventions. It represents the mUST be calculated as two
+	// different counters starting from `1` one for sent messages and one for
+	// received message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	MessageIDKey = attribute.Key("message.id")
+
+	// MessageTypeKey is the attribute Key conforming to the "message.type"
+	// semantic conventions. It represents the whether this is a received or
+	// sent message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessageTypeKey = attribute.Key("message.type")
+
+	// MessageUncompressedSizeKey is the attribute Key conforming to the
+	// "message.uncompressed_size" semantic conventions. It represents the
+	// uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+	// sent
+	MessageTypeSent = MessageTypeKey.String("SENT")
+	// received
+	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+	return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+	return MessageIDKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+	return MessageUncompressedSizeKey.Int(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef9bbd37a8c4ee451bf4bd23c98d289af4642ed1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+const (
+	// ExceptionEventName is the name of the Span event representing an exception.
+	ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
new file mode 100644
index 0000000000000000000000000000000000000000..69eda1959f20d15734230ca8ac96a35275c032e2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
@@ -0,0 +1,2556 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+	// CloudAccountIDKey is the attribute Key conforming to the
+	// "cloud.account.id" semantic conventions. It represents the cloud account
+	// ID the resource is assigned to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+	// CloudAvailabilityZoneKey is the attribute Key conforming to the
+	// "cloud.availability_zone" semantic conventions. It represents the cloud
+	// regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the
+	// resource is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google
+	// Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+	// CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+	// semantic conventions. It represents the cloud platform in use.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+
+	// CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+	// semantic conventions. It represents the name of the cloud provider.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	CloudProviderKey = attribute.Key("cloud.provider")
+
+	// CloudRegionKey is the attribute Key conforming to the "cloud.region"
+	// semantic conventions. It represents the geographical region the resource
+	// is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for
+	// example [Alibaba Cloud
+	// regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+	// regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+	// [Azure
+	// regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+	// [Google Cloud regions](https://cloud.google.com/about/locations), or
+	// [Tencent Cloud
+	// regions](https://www.tencentcloud.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+
+	// CloudResourceIDKey is the attribute Key conforming to the
+	// "cloud.resource_id" semantic conventions. It represents the cloud
+	// provider-specific native identifier of the monitored cloud resource
+	// (e.g. an
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+	// on AWS, a [fully qualified resource
+	// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+	// on Azure, a [full resource
+	// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+	// on GCP)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+	// '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+	// '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+	// Note: On some cloud providers, it may not be possible to determine the
+	// full ID at startup,
+	// so it may be necessary to set `cloud.resource_id` as a span attribute
+	// instead.
+	//
+	// The exact value to use for `cloud.resource_id` depends on the cloud
+	// provider.
+	// The following well-known definitions MUST be used if you set this
+	// attribute and they apply:
+	//
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+	//   Take care not to use the "invoked ARN" directly but replace any
+	//   [alias
+	// suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+	//   with the resolved function version, as the same runtime instance may
+	// be invokable with
+	//   multiple different aliases.
+	// * **GCP:** The [URI of the
+	// resource](https://cloud.google.com/iam/docs/full-resource-names)
+	// * **Azure:** The [Fully Qualified Resource
+	// ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+	// of the invoked function,
+	//   *not* the function app, having the form
+	// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+	//   This means that a span attribute MUST be used, as an Azure function
+	// app can host multiple functions that would usually share
+	//   a TracerProvider.
+	CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// Red Hat OpenShift on Alibaba Cloud
+	CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Red Hat OpenShift on AWS (ROSA)
+	CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Azure Red Hat OpenShift
+	CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+	// Google Bare Metal Solution (BMS)
+	CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Red Hat OpenShift on Google Cloud
+	CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+	// Red Hat OpenShift on IBM Cloud
+	CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// Heroku Platform as a Service
+	CloudProviderHeroku = CloudProviderKey.String("heroku")
+	// IBM Cloud
+	CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+	return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+	return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+	return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+	return CloudResourceIDKey.String(val)
+}
+
+// A container instance.
+const (
+	// ContainerCommandKey is the attribute Key conforming to the
+	// "container.command" semantic conventions. It represents the command used
+	// to run the container (i.e. the command name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol'
+	// Note: If using embedded credentials or sensitive data, it is recommended
+	// to remove them to prevent potential leakage.
+	ContainerCommandKey = attribute.Key("container.command")
+
+	// ContainerCommandArgsKey is the attribute Key conforming to the
+	// "container.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) run by the
+	// container. [2]
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol, --config, config.yaml'
+	ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+	// ContainerCommandLineKey is the attribute Key conforming to the
+	// "container.command_line" semantic conventions. It represents the full
+	// command run by the container as a single string representing the full
+	// command. [2]
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol --config config.yaml'
+	ContainerCommandLineKey = attribute.Key("container.command_line")
+
+	// ContainerIDKey is the attribute Key conforming to the "container.id"
+	// semantic conventions. It represents the container ID. Usually a UUID, as
+	// for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+	// The UUID might be abbreviated.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+
+	// ContainerImageIDKey is the attribute Key conforming to the
+	// "container.image.id" semantic conventions. It represents the runtime
+	// specific image identifier. Usually a hash algorithm followed by a UUID.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+	// Note: Docker defines a sha256 of the image id; `container.image.id`
+	// corresponds to the `Image` field from the Docker container inspect
+	// [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+	// endpoint.
+	// K8S defines a link to the container registry repository with digest
+	// `"imageID": "registry.azurecr.io
+	// /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+	// The ID is assinged by the container runtime and can vary in different
+	// environments. Consider using `oci.manifest.digest` if it is important to
+	// identify the same image in different environments/runtimes.
+	ContainerImageIDKey = attribute.Key("container.image.id")
+
+	// ContainerImageNameKey is the attribute Key conforming to the
+	// "container.image.name" semantic conventions. It represents the name of
+	// the image the container was built on.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+
+	// ContainerImageRepoDigestsKey is the attribute Key conforming to the
+	// "container.image.repo_digests" semantic conventions. It represents the
+	// repo digests of the container image as provided by the container
+	// runtime.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+	// 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+	// Note:
+	// [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+	// and
+	// [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+	// report those under the `RepoDigests` field.
+	ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+	// ContainerImageTagsKey is the attribute Key conforming to the
+	// "container.image.tags" semantic conventions. It represents the container
+	// image tags. An example can be found in [Docker Image
+	// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+	// Should be only the `<tag>` section of the full name for example from
+	// `registry.example.com/my-org/my-image:<tag>`.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'v1.27.1', '3.5.7-0'
+	ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+	// ContainerNameKey is the attribute Key conforming to the "container.name"
+	// semantic conventions. It represents the container name used by container
+	// runtime.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+
+	// ContainerRuntimeKey is the attribute Key conforming to the
+	// "container.runtime" semantic conventions. It represents the container
+	// runtime managing this container.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+	return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+	return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+	return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+	return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+	return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+	return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+	return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+	return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+	return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+	return ContainerRuntimeKey.String(val)
+}
+
+// Describes device attributes.
+const (
+	// DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+	// conventions. It represents a unique identifier representing the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values
+	// outlined below. This value is not an advertising identifier and MUST NOT
+	// be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+	// to the [vendor
+	// identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+	// On Android (Java or Kotlin), this value MUST be equal to the Firebase
+	// Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on
+	// best practices and exact implementation details. Caution should be taken
+	// when storing personal data or anything which can identify a user. GDPR
+	// and data protection laws may apply, ensure you do your own due
+	// diligence.
+	DeviceIDKey = attribute.Key("device.id")
+
+	// DeviceManufacturerKey is the attribute Key conforming to the
+	// "device.manufacturer" semantic conventions. It represents the name of
+	// the device manufacturer
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+	// DeviceModelIdentifierKey is the attribute Key conforming to the
+	// "device.model.identifier" semantic conventions. It represents the model
+	// identifier for the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine-readable version
+	// of the model identifier rather than the market or consumer-friendly name
+	// of the device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+	// DeviceModelNameKey is the attribute Key conforming to the
+	// "device.model.name" semantic conventions. It represents the marketing
+	// name for the device model
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human-readable version of
+	// the device model rather than a machine-readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+	return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+	return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+	return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+	return DeviceModelNameKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+	// HostArchKey is the attribute Key conforming to the "host.arch" semantic
+	// conventions. It represents the CPU architecture the host system is
+	// running on.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	HostArchKey = attribute.Key("host.arch")
+
+	// HostCPUCacheL2SizeKey is the attribute Key conforming to the
+	// "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+	// of level 2 memory cache available to the processor (in Bytes).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 12288000
+	HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+	// HostCPUFamilyKey is the attribute Key conforming to the
+	// "host.cpu.family" semantic conventions. It represents the family or
+	// generation of the CPU.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '6', 'PA-RISC 1.1e'
+	HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+	// HostCPUModelIDKey is the attribute Key conforming to the
+	// "host.cpu.model.id" semantic conventions. It represents the model
+	// identifier. It provides more granular information about the CPU,
+	// distinguishing it from other CPUs within the same family.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '6', '9000/778/B180L'
+	HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+	// HostCPUModelNameKey is the attribute Key conforming to the
+	// "host.cpu.model.name" semantic conventions. It represents the model
+	// designation of the processor.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+	HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+	// HostCPUSteppingKey is the attribute Key conforming to the
+	// "host.cpu.stepping" semantic conventions. It represents the stepping or
+	// core revisions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1
+	HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+	// HostCPUVendorIDKey is the attribute Key conforming to the
+	// "host.cpu.vendor.id" semantic conventions. It represents the processor
+	// manufacturer identifier. A maximum 12-character string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'GenuineIntel'
+	// Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+	// ID string in EBX, EDX and ECX registers. Writing these to memory in this
+	// order results in a 12-character string.
+	HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+	// HostIDKey is the attribute Key conforming to the "host.id" semantic
+	// conventions. It represents the unique host ID. For Cloud, this must be
+	// the instance_id assigned by the cloud provider. For non-containerized
+	// systems, this should be the `machine-id`. See the table below for the
+	// sources to use to determine the `machine-id` based on operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+	HostIDKey = attribute.Key("host.id")
+
+	// HostImageIDKey is the attribute Key conforming to the "host.image.id"
+	// semantic conventions. It represents the vM image ID or host OS image ID.
+	// For Cloud, this value is from the provider.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+
+	// HostImageNameKey is the attribute Key conforming to the
+	// "host.image.name" semantic conventions. It represents the name of the VM
+	// image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+
+	// HostImageVersionKey is the attribute Key conforming to the
+	// "host.image.version" semantic conventions. It represents the version
+	// string of the VM image or host OS as defined in [Version
+	// Attributes](/docs/resource/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+
+	// HostIPKey is the attribute Key conforming to the "host.ip" semantic
+	// conventions. It represents the available IP addresses of the host,
+	// excluding loopback interfaces.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+	// Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+	// addresses MUST be specified in the [RFC
+	// 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+	HostIPKey = attribute.Key("host.ip")
+
+	// HostMacKey is the attribute Key conforming to the "host.mac" semantic
+	// conventions. It represents the available MAC addresses of the host,
+	// excluding loopback interfaces.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+	// Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+	// form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+	// as hyphen-separated octets in uppercase hexadecimal form from most to
+	// least significant.
+	HostMacKey = attribute.Key("host.mac")
+
+	// HostNameKey is the attribute Key conforming to the "host.name" semantic
+	// conventions. It represents the name of the host. On Unix systems, it may
+	// contain what the hostname command returns, or the fully qualified
+	// hostname, or another name specified by the user.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+
+	// HostTypeKey is the attribute Key conforming to the "host.type" semantic
+	// conventions. It represents the type of host. For Cloud, this must be the
+	// machine type.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+	return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+	return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+	return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+	return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val int) attribute.KeyValue {
+	return HostCPUSteppingKey.Int(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+	return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+	return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+	return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+	return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+	return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+	return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+	return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+	return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+	return HostTypeKey.String(val)
+}
+
+// Kubernetes resource attributes.
+const (
+	// K8SClusterNameKey is the attribute Key conforming to the
+	// "k8s.cluster.name" semantic conventions. It represents the name of the
+	// cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+	// K8SClusterUIDKey is the attribute Key conforming to the
+	// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+	// the cluster, set to the UID of the `kube-system` namespace.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+	// Note: K8S doesn't have support for obtaining a cluster ID. If this is
+	// ever
+	// added, we will recommend collecting the `k8s.cluster.uid` through the
+	// official APIs. In the meantime, we are able to use the `uid` of the
+	// `kube-system` namespace as a proxy for cluster ID. Read on for the
+	// rationale.
+	//
+	// Every object created in a K8S cluster is assigned a distinct UID. The
+	// `kube-system` namespace is used by Kubernetes itself and will exist
+	// for the lifetime of the cluster. Using the `uid` of the `kube-system`
+	// namespace is a reasonable proxy for the K8S ClusterID as it will only
+	// change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+	// UUIDs as standardized by
+	// [ISO/IEC 9834-8 and ITU-T
+	// X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+	// Which states:
+	//
+	// > If generated according to one of the mechanisms defined in Rec.
+	//   ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+	//   different from all other UUIDs generated before 3603 A.D., or is
+	//   extremely likely to be different (depending on the mechanism chosen).
+	//
+	// Therefore, UIDs between clusters should be extremely unlikely to
+	// conflict.
+	K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+	// K8SContainerNameKey is the attribute Key conforming to the
+	// "k8s.container.name" semantic conventions. It represents the name of the
+	// Container from Pod specification, must be unique within a Pod. Container
+	// runtime usually uses different globally unique name (`container.name`).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+	// K8SContainerRestartCountKey is the attribute Key conforming to the
+	// "k8s.container.restart_count" semantic conventions. It represents the
+	// number of times the container was restarted. This attribute can be used
+	// to identify a particular container (running or stopped) within a
+	// container spec.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 2
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+	// K8SCronJobNameKey is the attribute Key conforming to the
+	// "k8s.cronjob.name" semantic conventions. It represents the name of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+	// K8SCronJobUIDKey is the attribute Key conforming to the
+	// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+	// K8SDaemonSetNameKey is the attribute Key conforming to the
+	// "k8s.daemonset.name" semantic conventions. It represents the name of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+	// K8SDaemonSetUIDKey is the attribute Key conforming to the
+	// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+	// K8SDeploymentNameKey is the attribute Key conforming to the
+	// "k8s.deployment.name" semantic conventions. It represents the name of
+	// the Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+	// K8SDeploymentUIDKey is the attribute Key conforming to the
+	// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+	// Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+	// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+	// semantic conventions. It represents the name of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+
+	// K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+	// semantic conventions. It represents the UID of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+	// K8SNamespaceNameKey is the attribute Key conforming to the
+	// "k8s.namespace.name" semantic conventions. It represents the name of the
+	// namespace that the pod is running in.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+	// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+	// semantic conventions. It represents the name of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+	// K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+	// semantic conventions. It represents the UID of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+	// K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+	// semantic conventions. It represents the name of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+	// K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+	// semantic conventions. It represents the UID of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+	// K8SReplicaSetNameKey is the attribute Key conforming to the
+	// "k8s.replicaset.name" semantic conventions. It represents the name of
+	// the ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+	// K8SReplicaSetUIDKey is the attribute Key conforming to the
+	// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+	// ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+	// K8SStatefulSetNameKey is the attribute Key conforming to the
+	// "k8s.statefulset.name" semantic conventions. It represents the name of
+	// the StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+	// K8SStatefulSetUIDKey is the attribute Key conforming to the
+	// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+	// StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+	return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+	return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+	return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+	return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+	return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+	return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+	return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+	return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+	return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+	return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+	return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+	return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+	return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+	return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+	return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+	return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+	return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+	return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+	return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+	return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+	return K8SStatefulSetUIDKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+	// OciManifestDigestKey is the attribute Key conforming to the
+	// "oci.manifest.digest" semantic conventions. It represents the digest of
+	// the OCI image manifest. For container images specifically is the digest
+	// by which the container image is known.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+	// Note: Follows [OCI Image Manifest
+	// Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+	// and specifically the [Digest
+	// property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+	// An example can be found in [Example Image
+	// Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+	OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+	return OciManifestDigestKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+	// OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+	// semantic conventions. It represents the unique identifier for a
+	// particular build or compilation of the operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+	OSBuildIDKey = attribute.Key("os.build_id")
+
+	// OSDescriptionKey is the attribute Key conforming to the "os.description"
+	// semantic conventions. It represents the human readable (not intended to
+	// be parsed) OS version information, like e.g. reported by `ver` or
+	// `lsb_release -a` commands.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+	// LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+
+	// OSNameKey is the attribute Key conforming to the "os.name" semantic
+	// conventions. It represents the human readable operating system name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+
+	// OSTypeKey is the attribute Key conforming to the "os.type" semantic
+	// conventions. It represents the operating system type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	OSTypeKey = attribute.Key("os.type")
+
+	// OSVersionKey is the attribute Key conforming to the "os.version"
+	// semantic conventions. It represents the version string of the operating
+	// system as defined in [Version
+	// Attributes](/docs/resource/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// SunOS, Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+	return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+	return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+	return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+	return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+	// ProcessCommandKey is the attribute Key conforming to the
+	// "process.command" semantic conventions. It represents the command used
+	// to launch the process (i.e. the command name). On Linux based systems,
+	// can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+	// be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+
+	// ProcessCommandArgsKey is the attribute Key conforming to the
+	// "process.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited
+	// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+	// this would be the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+	// ProcessCommandLineKey is the attribute Key conforming to the
+	// "process.command_line" semantic conventions. It represents the full
+	// command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`.
+	// Do not set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+
+	// ProcessExecutableNameKey is the attribute Key conforming to the
+	// "process.executable.name" semantic conventions. It represents the name
+	// of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+	// of `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+	// ProcessExecutablePathKey is the attribute Key conforming to the
+	// "process.executable.path" semantic conventions. It represents the full
+	// path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+	// ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+	// semantic conventions. It represents the username of the user that owns
+	// the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+
+	// ProcessParentPIDKey is the attribute Key conforming to the
+	// "process.parent_pid" semantic conventions. It represents the parent
+	// Process identifier (PPID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 111
+	ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+	// ProcessPIDKey is the attribute Key conforming to the "process.pid"
+	// semantic conventions. It represents the process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+
+	// ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+	// "process.runtime.description" semantic conventions. It represents an
+	// additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+	// ProcessRuntimeNameKey is the attribute Key conforming to the
+	// "process.runtime.name" semantic conventions. It represents the name of
+	// the runtime of this process. For compiled native binaries, this SHOULD
+	// be the name of the compiler.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+	// ProcessRuntimeVersionKey is the attribute Key conforming to the
+	// "process.runtime.version" semantic conventions. It represents the
+	// version of the runtime of this process, as returned by the runtime
+	// without modification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+	return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+	return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+	return ProcessCommandLineKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+	return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+	return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+	return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+	return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+	return ProcessPIDKey.Int(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+	return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+	return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+	return ProcessRuntimeVersionKey.String(val)
+}
+
+// The Android platform on which the Android application is running.
+const (
+	// AndroidOSAPILevelKey is the attribute Key conforming to the
+	// "android.os.api_level" semantic conventions. It represents the uniquely
+	// identifies the framework API revision offered by a version
+	// (`os.version`) of the android operating system. More information can be
+	// found
+	// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '33', '32'
+	AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+	return AndroidOSAPILevelKey.String(val)
+}
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+	// BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+	// semantic conventions. It represents the array of brand name and version
+	// separated by a space
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.brands`).
+	BrowserBrandsKey = attribute.Key("browser.brands")
+
+	// BrowserLanguageKey is the attribute Key conforming to the
+	// "browser.language" semantic conventions. It represents the preferred
+	// language of the user using the browser
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'en', 'en-US', 'fr', 'fr-FR'
+	// Note: This value is intended to be taken from the Navigator API
+	// `navigator.language`.
+	BrowserLanguageKey = attribute.Key("browser.language")
+
+	// BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+	// semantic conventions. It represents a boolean that is true if the
+	// browser is running on a mobile device
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.mobile`). If unavailable, this attribute
+	// SHOULD be left unset.
+	BrowserMobileKey = attribute.Key("browser.mobile")
+
+	// BrowserPlatformKey is the attribute Key conforming to the
+	// "browser.platform" semantic conventions. It represents the platform on
+	// which the browser is running
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Windows', 'macOS', 'Android'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.platform`). If unavailable, the legacy
+	// `navigator.platform` API SHOULD NOT be used instead and this attribute
+	// SHOULD be left unset in order for the values to be consistent.
+	// The list of possible values is defined in the [W3C User-Agent Client
+	// Hints
+	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+	// Note that some (but not all) of these values can overlap with values in
+	// the [`os.type` and `os.name` attributes](./os.md). However, for
+	// consistency, the values in the `browser.platform` attribute should
+	// capture the exact value that the user agent provides.
+	BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+	return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+	return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+	return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+	return BrowserPlatformKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+	// AWSECSClusterARNKey is the attribute Key conforming to the
+	// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+	// [ECS
+	// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+	// AWSECSContainerARNKey is the attribute Key conforming to the
+	// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+	// Resource Name (ARN) of an [ECS container
+	// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+	// AWSECSLaunchtypeKey is the attribute Key conforming to the
+	// "aws.ecs.launchtype" semantic conventions. It represents the [launch
+	// type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+	// for an ECS task.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+	// AWSECSTaskARNKey is the attribute Key conforming to the
+	// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+	// [ECS task
+	// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+	// AWSECSTaskFamilyKey is the attribute Key conforming to the
+	// "aws.ecs.task.family" semantic conventions. It represents the task
+	// definition family this task definition is a member of.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+	// AWSECSTaskRevisionKey is the attribute Key conforming to the
+	// "aws.ecs.task.revision" semantic conventions. It represents the revision
+	// for this task definition.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+	return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+	return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+	return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+	return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+	return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+	// AWSEKSClusterARNKey is the attribute Key conforming to the
+	// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+	// EKS cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+	return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+	// AWSLogGroupARNsKey is the attribute Key conforming to the
+	// "aws.log.group.arns" semantic conventions. It represents the Amazon
+	// Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+	// AWSLogGroupNamesKey is the attribute Key conforming to the
+	// "aws.log.group.names" semantic conventions. It represents the name(s) of
+	// the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like
+	// multi-container applications, where a single application has sidecar
+	// containers, and each write to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+	// AWSLogStreamARNsKey is the attribute Key conforming to the
+	// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+	// the AWS log stream(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	// One log group can contain several log streams, so these ARNs necessarily
+	// identify both a log group and a log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+	// AWSLogStreamNamesKey is the attribute Key conforming to the
+	// "aws.log.stream.names" semantic conventions. It represents the name(s)
+	// of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+	return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+	return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+	return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+	return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+	// GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.execution" semantic conventions. It represents the
+	// name of the Cloud Run
+	// [execution](https://cloud.google.com/run/docs/managing/job-executions)
+	// being run for the Job, as set by the
+	// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'job-name-xxxx', 'sample-job-mdw84'
+	GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+	// GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+	// index for a task within an execution as provided by the
+	// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 1
+	GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+	return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+	return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+	// GCPGceInstanceHostnameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.hostname" semantic conventions. It represents the
+	// hostname of a GCE instance. This is the full value of the default or
+	// [custom
+	// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-host1234.example.com',
+	// 'sample-vm.us-west1-b.c.my-project.internal'
+	GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+	// GCPGceInstanceNameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.name" semantic conventions. It represents the instance
+	// name of a GCE instance. This is the value provided by `host.name`, the
+	// visible name of the instance in the Cloud Console UI, and the prefix for
+	// the default hostname of the instance as defined by the [default internal
+	// DNS
+	// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'instance-1', 'my-vm-name'
+	GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+	return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+	return GCPGceInstanceNameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+	// HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+	// semantic conventions. It represents the unique identifier for the
+	// application
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+	HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+	// HerokuReleaseCommitKey is the attribute Key conforming to the
+	// "heroku.release.commit" semantic conventions. It represents the commit
+	// hash for the current release
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+	HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+	// HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+	// "heroku.release.creation_timestamp" semantic conventions. It represents
+	// the time and date the release was created
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2022-10-23T18:00:42Z'
+	HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+	return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+	return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+	return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// The software deployment.
+const (
+	// DeploymentEnvironmentKey is the attribute Key conforming to the
+	// "deployment.environment" semantic conventions. It represents the name of
+	// the [deployment
+	// environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'staging', 'production'
+	// Note: `deployment.environment` does not affect the uniqueness
+	// constraints defined through
+	// the `service.namespace`, `service.name` and `service.instance.id`
+	// resource attributes.
+	// This implies that resources carrying the following attribute
+	// combinations MUST be
+	// considered to be identifying the same service:
+	//
+	// * `service.name=frontend`, `deployment.environment=production`
+	// * `service.name=frontend`, `deployment.environment=staging`.
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+	return DeploymentEnvironmentKey.String(val)
+}
+
+// A serverless instance.
+const (
+	// FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+	// semantic conventions. It represents the execution environment ID as a
+	// string, that will be potentially reused for other invocations to the
+	// same function/function version.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+
+	// FaaSMaxMemoryKey is the attribute Key conforming to the
+	// "faas.max_memory" semantic conventions. It represents the amount of
+	// memory available to the serverless function converted to Bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 134217728
+	// Note: It's recommended to set this attribute since e.g. too little
+	// memory can easily stop a Java AWS Lambda function from working
+	// correctly. On AWS Lambda, the environment variable
+	// `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+	// be multiplied by 1,048,576).
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+	// FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+	// conventions. It represents the name of the single function that this
+	// runtime instance executes.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+	// Note: This is the name of the function as configured/deployed on the
+	// FaaS
+	// platform and is usually different from the name of the callback
+	// function (which may be stored in the
+	// [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+	// span attributes).
+	//
+	// For some cloud providers, the above definition is ambiguous. The
+	// following
+	// definition of function name MUST be used for this attribute
+	// (and consequently the span name) for the listed cloud
+	// providers/products:
+	//
+	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+	//   followed by a forward slash followed by the function name (this form
+	//   can also be seen in the resource JSON for the function).
+	//   This means that a span attribute MUST be used, as an Azure function
+	//   app can host multiple functions that would usually share
+	//   a TracerProvider (see also the `cloud.resource_id` attribute).
+	FaaSNameKey = attribute.Key("faas.name")
+
+	// FaaSVersionKey is the attribute Key conforming to the "faas.version"
+	// semantic conventions. It represents the immutable version of the
+	// function being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+	//
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run (Services):** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+	return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+	return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+	return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+	return FaaSVersionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceNameKey is the attribute Key conforming to the "service.name"
+	// semantic conventions. It represents the logical name of the service.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled
+	// services. If the value was not specified, SDKs MUST fallback to
+	// `unknown_service:` concatenated with
+	// [`process.executable.name`](process.md#process), e.g.
+	// `unknown_service:bash`. If `process.executable.name` is not available,
+	// the value MUST be set to `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+
+	// ServiceVersionKey is the attribute Key conforming to the
+	// "service.version" semantic conventions. It represents the version string
+	// of the service API or implementation. The format is not defined by these
+	// conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2.0.0', 'a01dbef8a'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+	return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+	return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceInstanceIDKey is the attribute Key conforming to the
+	// "service.instance.id" semantic conventions. It represents the string ID
+	// of the service instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-k8s-pod-deployment-1',
+	// '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be
+	// globally unique). The ID helps to distinguish instances of the same
+	// service that exist at the same time (e.g. instances of a horizontally
+	// scaled service). It is preferable for the ID to be persistent and stay
+	// the same for the lifetime of the service instance, however it is
+	// acceptable that the ID is ephemeral and changes during important
+	// lifetime events for the service (e.g. service restarts). If the service
+	// has no inherent unique ID that can be used as the value of this
+	// attribute it is recommended to generate a random Version 1 or Version 4
+	// RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+	// Version 5, see RFC 4122 for more recommendations).
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+	// ServiceNamespaceKey is the attribute Key conforming to the
+	// "service.namespace" semantic conventions. It represents a namespace for
+	// `service.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group
+	// of services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name`
+	// is expected to be unique for all services that have no explicit
+	// namespace defined (so the empty/unspecified namespace is simply one more
+	// valid namespace). Zero-length namespace string is assumed equal to
+	// unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+	return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+	return ServiceNamespaceKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetrySDKLanguageKey is the attribute Key conforming to the
+	// "telemetry.sdk.language" semantic conventions. It represents the
+	// language of the telemetry SDK.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+	// TelemetrySDKNameKey is the attribute Key conforming to the
+	// "telemetry.sdk.name" semantic conventions. It represents the name of the
+	// telemetry SDK as defined above.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	// Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+	// to `opentelemetry`.
+	// If another SDK, like a fork or a vendor-provided implementation, is
+	// used, this SDK MUST set the
+	// `telemetry.sdk.name` attribute to the fully-qualified class or module
+	// name of this SDK's main entry point
+	// or another suitable identifier depending on the language.
+	// The identifier `opentelemetry` is reserved and MUST NOT be used in this
+	// case.
+	// All custom identifiers SHOULD be stable across different versions of an
+	// implementation.
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+	// TelemetrySDKVersionKey is the attribute Key conforming to the
+	// "telemetry.sdk.version" semantic conventions. It represents the version
+	// string of the telemetry SDK.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// rust
+	TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+	return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+	return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetryDistroNameKey is the attribute Key conforming to the
+	// "telemetry.distro.name" semantic conventions. It represents the name of
+	// the auto instrumentation agent or distribution, if used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'parts-unlimited-java'
+	// Note: Official auto instrumentation agents and distributions SHOULD set
+	// the `telemetry.distro.name` attribute to
+	// a string starting with `opentelemetry-`, e.g.
+	// `opentelemetry-java-instrumentation`.
+	TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+	// TelemetryDistroVersionKey is the attribute Key conforming to the
+	// "telemetry.distro.version" semantic conventions. It represents the
+	// version string of the auto instrumentation agent or distribution, if
+	// used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.2.3'
+	TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+	return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+	return TelemetryDistroVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+	// WebEngineDescriptionKey is the attribute Key conforming to the
+	// "webengine.description" semantic conventions. It represents the
+	// additional description of the web engine (e.g. detailed version and
+	// edition information).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+	// 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+	// WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+	// semantic conventions. It represents the name of the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+
+	// WebEngineVersionKey is the attribute Key conforming to the
+	// "webengine.version" semantic conventions. It represents the version of
+	// the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+	return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+	return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+	return WebEngineVersionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+	// OTelScopeNameKey is the attribute Key conforming to the
+	// "otel.scope.name" semantic conventions. It represents the name of the
+	// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+	// OTelScopeVersionKey is the attribute Key conforming to the
+	// "otel.scope.version" semantic conventions. It represents the version of
+	// the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.0.0'
+	OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+	return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+	return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+	// OTelLibraryNameKey is the attribute Key conforming to the
+	// "otel.library.name" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	// Deprecated: use the `otel.scope.name` attribute.
+	OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+	// OTelLibraryVersionKey is the attribute Key conforming to the
+	// "otel.library.version" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '1.0.0'
+	// Deprecated: use the `otel.scope.version` attribute.
+	OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions.
+//
+// Deprecated: use the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+	return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions.
+//
+// Deprecated: use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+	return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
new file mode 100644
index 0000000000000000000000000000000000000000..9733ce888a049b9f402799dea3529af0fc6807a3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
new file mode 100644
index 0000000000000000000000000000000000000000..397174818b7aabf6a48ec25aa2cbd6c67372699f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
@@ -0,0 +1,1334 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Operations that access some remote service.
+const (
+	// PeerServiceKey is the attribute Key conforming to the "peer.service"
+	// semantic conventions. It represents the
+	// [`service.name`](/docs/resource/README.md#service) of the remote
+	// service. SHOULD be equal to the actual `service.name` resource attribute
+	// of the remote service if any.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+	return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+	// EnduserIDKey is the attribute Key conforming to the "enduser.id"
+	// semantic conventions. It represents the username or client_id extracted
+	// from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+	// in the inbound request from outside the system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+
+	// EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+	// semantic conventions. It represents the actual/assumed role the client
+	// is making the request under extracted from token or application security
+	// context.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+
+	// EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+	// semantic conventions. It represents the scopes or granted authorities
+	// the client currently possesses extracted from token or application
+	// security context. The value would come from the scope associated with an
+	// [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+	// value in a [SAML 2.0
+	// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+	return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+	return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+	return EnduserScopeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+	// CodeColumnKey is the attribute Key conforming to the "code.column"
+	// semantic conventions. It represents the column number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 16
+	CodeColumnKey = attribute.Key("code.column")
+
+	// CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+	// semantic conventions. It represents the source code file name that
+	// identifies the code unit as uniquely as possible (preferably an absolute
+	// file path).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+
+	// CodeFunctionKey is the attribute Key conforming to the "code.function"
+	// semantic conventions. It represents the method or function name, or
+	// equivalent (usually rightmost part of the code unit's name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+
+	// CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+	// semantic conventions. It represents the line number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+
+	// CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+	// semantic conventions. It represents the "namespace" within which
+	// `code.function` is defined. Usually the qualified class or module name,
+	// such that `code.namespace` + some separator + `code.function` form a
+	// unique identifier for the code unit.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+
+	// CodeStacktraceKey is the attribute Key conforming to the
+	// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+	// string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'at
+	// com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+	return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+	return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+	return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+	return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+	return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+	return CodeStacktraceKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+	// ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+	// conventions. It represents the current "managed" thread ID (as opposed
+	// to OS thread ID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+
+	// ThreadNameKey is the attribute Key conforming to the "thread.name"
+	// semantic conventions. It represents the current thread name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+	return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+	return ThreadNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+	// AWSLambdaInvokedARNKey is the attribute Key conforming to the
+	// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+	// invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+	// `/runtime/invocation/next` applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `cloud.resource_id` if an alias is
+	// involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+	return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+	// CloudeventsEventIDKey is the attribute Key conforming to the
+	// "cloudevents.event_id" semantic conventions. It represents the
+	// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+	// uniquely identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+	// CloudeventsEventSourceKey is the attribute Key conforming to the
+	// "cloudevents.event_source" semantic conventions. It represents the
+	// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+	// identifies the context in which an event happened.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'https://github.com/cloudevents',
+	// '/cloudevents/spec/pull/123', 'my-service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+	// CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+	// "cloudevents.event_spec_version" semantic conventions. It represents the
+	// [version of the CloudEvents
+	// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+	// which the event uses.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+	// CloudeventsEventSubjectKey is the attribute Key conforming to the
+	// "cloudevents.event_subject" semantic conventions. It represents the
+	// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+	// of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+	// CloudeventsEventTypeKey is the attribute Key conforming to the
+	// "cloudevents.event_type" semantic conventions. It represents the
+	// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+	// contains a value describing the type of event related to the originating
+	// occurrence.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com.github.pull_request.opened',
+	// 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+	return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+	return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+	return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+	return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+	return CloudeventsEventTypeKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+	// OpentracingRefTypeKey is the attribute Key conforming to the
+	// "opentracing.ref_type" semantic conventions. It represents the
+	// parent-child Reference type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span doesn't depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+	// OTelStatusCodeKey is the attribute Key conforming to the
+	// "otel.status_code" semantic conventions. It represents the name of the
+	// code, either "OK" or "ERROR". MUST NOT be set if the status code is
+	// UNSET.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+	// OTelStatusDescriptionKey is the attribute Key conforming to the
+	// "otel.status_description" semantic conventions. It represents the
+	// description of the Status if it has a value, otherwise not set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'resource not found'
+	OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+	// The operation has been validated by an Application developer or Operator to have completed successfully
+	OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+	// The operation contains an error
+	OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+	return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+	// FaaSInvocationIDKey is the attribute Key conforming to the
+	// "faas.invocation_id" semantic conventions. It represents the invocation
+	// ID of the current function invocation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+	return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+	// FaaSDocumentCollectionKey is the attribute Key conforming to the
+	// "faas.document.collection" semantic conventions. It represents the name
+	// of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in
+	// Cosmos DB to the database name.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+	// FaaSDocumentNameKey is the attribute Key conforming to the
+	// "faas.document.name" semantic conventions. It represents the document
+	// name/table subjected to the operation. For example, in Cloud Storage or
+	// S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+	// FaaSDocumentOperationKey is the attribute Key conforming to the
+	// "faas.document.operation" semantic conventions. It represents the
+	// describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+	// FaaSDocumentTimeKey is the attribute Key conforming to the
+	// "faas.document.time" semantic conventions. It represents a string
+	// containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+	return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+	return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+	return FaaSDocumentTimeKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+	// FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+	// conventions. It represents a string containing the schedule period as
+	// [Cron
+	// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+
+	// FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+	// conventions. It represents a string containing the function invocation
+	// time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+)
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+	return FaaSCronKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+	return FaaSTimeKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+	// FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+	// semantic conventions. It represents a boolean that is true if the
+	// serverless function is executed for the first time (aka cold-start).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+	return FaaSColdstartKey.Bool(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+	// AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+	// semantic conventions. It represents the AWS request ID as returned in
+	// the response headers `x-amz-request-id` or `x-amz-requestid`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+	AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+	return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+	// AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+	// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+	// value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+	// AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+	// "aws.dynamodb.consistent_read" semantic conventions. It represents the
+	// value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+	// AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+	// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+	// JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+	// "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number }, "TableName": "string",
+	// "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+	// AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+	// "aws.dynamodb.index_name" semantic conventions. It represents the value
+	// of the `IndexName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+	// AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+	// the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+	// represents the JSON-serialized value of the `ItemCollectionMetrics`
+	// response field.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+	// blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+	// "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+	// "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+	// "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+	// AWSDynamoDBLimitKey is the attribute Key conforming to the
+	// "aws.dynamodb.limit" semantic conventions. It represents the value of
+	// the `Limit` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+	// AWSDynamoDBProjectionKey is the attribute Key conforming to the
+	// "aws.dynamodb.projection" semantic conventions. It represents the value
+	// of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+	// RelatedItems, ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+	// AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+	// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+	// represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+	// request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+	// AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+	// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+	// It represents the value of the
+	// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+	// AWSDynamoDBSelectKey is the attribute Key conforming to the
+	// "aws.dynamodb.select" semantic conventions. It represents the value of
+	// the `Select` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+	// AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_names" semantic conventions. It represents the keys
+	// in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+)
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+	return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+	return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+	return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+	return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+	return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+	return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+	return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+	return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// DynamoDB.CreateTable
+const (
+	// AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `GlobalSecondaryIndexes` request field
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+	// "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+	// "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+	// AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `LocalSecondaryIndexes` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "IndexARN": "string", "IndexName": "string",
+	// "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+	// AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+	// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+	// the value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+	// AWSDynamoDBTableCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_count" semantic conventions. It represents the the
+	// number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+	return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+	return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+	// AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+	// "aws.dynamodb.scan_forward" semantic conventions. It represents the
+	// value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+	return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+	// AWSDynamoDBCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.count" semantic conventions. It represents the value of
+	// the `Count` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+	// AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.scanned_count" semantic conventions. It represents the
+	// value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+	// AWSDynamoDBSegmentKey is the attribute Key conforming to the
+	// "aws.dynamodb.segment" semantic conventions. It represents the value of
+	// the `Segment` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+	// AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+	// "aws.dynamodb.total_segments" semantic conventions. It represents the
+	// value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+	return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+	return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+	return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+	return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+	// AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+	// the "aws.dynamodb.attribute_definitions" semantic conventions. It
+	// represents the JSON-serialized value of each item in the
+	// `AttributeDefinitions` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+	// AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+	// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+	// conventions. It represents the JSON-serialized value of each item in the
+	// the `GlobalSecondaryIndexUpdates` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+	// AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+	// semantic conventions. It represents the S3 bucket name the request
+	// refers to. Corresponds to the `--bucket` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'some-bucket-name'
+	// Note: The `bucket` attribute is applicable to all S3 operations that
+	// reference a bucket, i.e. that require the bucket name as a mandatory
+	// parameter.
+	// This applies to almost all S3 operations except `list-buckets`.
+	AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+	// AWSS3CopySourceKey is the attribute Key conforming to the
+	// "aws.s3.copy_source" semantic conventions. It represents the source
+	// object (in the form `bucket`/`key`) for the copy operation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'someFile.yml'
+	// Note: The `copy_source` attribute applies to S3 copy operations and
+	// corresponds to the `--copy-source` parameter
+	// of the [copy-object operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+	// AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+	// semantic conventions. It represents the delete request container that
+	// specifies the objects to be deleted.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+	// Note: The `delete` attribute is only applicable to the
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// operation.
+	// The `delete` attribute corresponds to the `--delete` parameter of the
+	// [delete-objects operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+	AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+	// AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+	// conventions. It represents the S3 object key the request refers to.
+	// Corresponds to the `--key` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'someFile.yml'
+	// Note: The `key` attribute is applicable to all object-related S3
+	// operations, i.e. that require the object key as a mandatory parameter.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// -
+	// [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+	// -
+	// [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+	// -
+	// [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+	// -
+	// [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+	// -
+	// [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+	// AWSS3PartNumberKey is the attribute Key conforming to the
+	// "aws.s3.part_number" semantic conventions. It represents the part number
+	// of the part being uploaded in a multipart-upload operation. This is a
+	// positive integer between 1 and 10,000.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3456
+	// Note: The `part_number` attribute is only applicable to the
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// and
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	// operations.
+	// The `part_number` attribute corresponds to the `--part-number` parameter
+	// of the
+	// [upload-part operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+	AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+	// AWSS3UploadIDKey is the attribute Key conforming to the
+	// "aws.s3.upload_id" semantic conventions. It represents the upload ID
+	// that identifies the multipart upload.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+	// Note: The `upload_id` attribute applies to S3 multipart-upload
+	// operations and corresponds to the `--upload-id` parameter
+	// of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// multipart operations.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+	return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+	return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+	return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+	return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+	return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+	return AWSS3UploadIDKey.String(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+	// GraphqlDocumentKey is the attribute Key conforming to the
+	// "graphql.document" semantic conventions. It represents the GraphQL
+	// document being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+	// Note: The value may be sanitized to exclude sensitive information.
+	GraphqlDocumentKey = attribute.Key("graphql.document")
+
+	// GraphqlOperationNameKey is the attribute Key conforming to the
+	// "graphql.operation.name" semantic conventions. It represents the name of
+	// the operation being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'findBookByID'
+	GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+	// GraphqlOperationTypeKey is the attribute Key conforming to the
+	// "graphql.operation.type" semantic conventions. It represents the type of
+	// the operation being executed.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'query', 'mutation', 'subscription'
+	GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+	// GraphQL query
+	GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+	// GraphQL mutation
+	GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+	// GraphQL subscription
+	GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+	return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+	return GraphqlOperationNameKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
new file mode 100644
index 0000000000000000000000000000000000000000..caf7249de859b8edcdf07d3996fbc901e23e1ca6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Tracer creates a named tracer that implements Tracer interface.
+// If the name is an empty string then provider uses default name.
+//
+// This is short for GetTracerProvider().Tracer(name, opts...)
+func Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	return GetTracerProvider().Tracer(name, opts...)
+}
+
+// GetTracerProvider returns the registered global trace provider.
+// If none is registered then an instance of NoopTracerProvider is returned.
+//
+// Use the trace provider to create a named tracer. E.g.
+//
+//	tracer := otel.GetTracerProvider().Tracer("example.com/foo")
+//
+// or
+//
+//	tracer := otel.Tracer("example.com/foo")
+func GetTracerProvider() trace.TracerProvider {
+	return global.TracerProvider()
+}
+
+// SetTracerProvider registers `tp` as the global trace provider.
+func SetTracerProvider(tp trace.TracerProvider) {
+	global.SetTracerProvider(tp)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..3aadc66cf7a7bee5cb0cd989c884b603d47205ed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -0,0 +1,334 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// TracerConfig is a group of options for a Tracer.
+type TracerConfig struct {
+	instrumentationVersion string
+	// Schema URL of the telemetry emitted by the Tracer.
+	schemaURL string
+	attrs     attribute.Set
+}
+
+// InstrumentationVersion returns the version of the library providing instrumentation.
+func (t *TracerConfig) InstrumentationVersion() string {
+	return t.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
+	return t.attrs
+}
+
+// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
+func (t *TracerConfig) SchemaURL() string {
+	return t.schemaURL
+}
+
+// NewTracerConfig applies all the options to a returned TracerConfig.
+func NewTracerConfig(options ...TracerOption) TracerConfig {
+	var config TracerConfig
+	for _, option := range options {
+		config = option.apply(config)
+	}
+	return config
+}
+
+// TracerOption applies an option to a TracerConfig.
+type TracerOption interface {
+	apply(TracerConfig) TracerConfig
+}
+
+type tracerOptionFunc func(TracerConfig) TracerConfig
+
+func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig {
+	return fn(cfg)
+}
+
+// SpanConfig is a group of options for a Span.
+type SpanConfig struct {
+	attributes []attribute.KeyValue
+	timestamp  time.Time
+	links      []Link
+	newRoot    bool
+	spanKind   SpanKind
+	stackTrace bool
+}
+
+// Attributes describe the associated qualities of a Span.
+func (cfg *SpanConfig) Attributes() []attribute.KeyValue {
+	return cfg.attributes
+}
+
+// Timestamp is a time in a Span life-cycle.
+func (cfg *SpanConfig) Timestamp() time.Time {
+	return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *SpanConfig) StackTrace() bool {
+	return cfg.stackTrace
+}
+
+// Links are the associations a Span has with other Spans.
+func (cfg *SpanConfig) Links() []Link {
+	return cfg.links
+}
+
+// NewRoot identifies a Span as the root Span for a new trace. This is
+// commonly used when an existing trace crosses trust boundaries and the
+// remote parent span context should be ignored for security.
+func (cfg *SpanConfig) NewRoot() bool {
+	return cfg.newRoot
+}
+
+// SpanKind is the role a Span has in a trace.
+func (cfg *SpanConfig) SpanKind() SpanKind {
+	return cfg.spanKind
+}
+
+// NewSpanStartConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanStartConfig(options ...SpanStartOption) SpanConfig {
+	var c SpanConfig
+	for _, option := range options {
+		c = option.applySpanStart(c)
+	}
+	return c
+}
+
+// NewSpanEndConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanEndConfig(options ...SpanEndOption) SpanConfig {
+	var c SpanConfig
+	for _, option := range options {
+		c = option.applySpanEnd(c)
+	}
+	return c
+}
+
+// SpanStartOption applies an option to a SpanConfig. These options are applicable
+// only when the span is created.
+type SpanStartOption interface {
+	applySpanStart(SpanConfig) SpanConfig
+}
+
+type spanOptionFunc func(SpanConfig) SpanConfig
+
+func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig {
+	return fn(cfg)
+}
+
+// SpanEndOption applies an option to a SpanConfig. These options are
+// applicable only when the span is ended.
+type SpanEndOption interface {
+	applySpanEnd(SpanConfig) SpanConfig
+}
+
+// EventConfig is a group of options for an Event.
+type EventConfig struct {
+	attributes []attribute.KeyValue
+	timestamp  time.Time
+	stackTrace bool
+}
+
+// Attributes describe the associated qualities of an Event.
+func (cfg *EventConfig) Attributes() []attribute.KeyValue {
+	return cfg.attributes
+}
+
+// Timestamp is a time in an Event life-cycle.
+func (cfg *EventConfig) Timestamp() time.Time {
+	return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *EventConfig) StackTrace() bool {
+	return cfg.stackTrace
+}
+
+// NewEventConfig applies all the EventOptions to a returned EventConfig. If no
+// timestamp option is passed, the returned EventConfig will have a Timestamp
+// set to the call time, otherwise no validation is performed on the returned
+// EventConfig.
+func NewEventConfig(options ...EventOption) EventConfig {
+	var c EventConfig
+	for _, option := range options {
+		c = option.applyEvent(c)
+	}
+	if c.timestamp.IsZero() {
+		c.timestamp = time.Now()
+	}
+	return c
+}
+
+// EventOption applies span event options to an EventConfig.
+type EventOption interface {
+	applyEvent(EventConfig) EventConfig
+}
+
+// SpanOption are options that can be used at both the beginning and end of a span.
+type SpanOption interface {
+	SpanStartOption
+	SpanEndOption
+}
+
+// SpanStartEventOption are options that can be used at the start of a span, or with an event.
+type SpanStartEventOption interface {
+	SpanStartOption
+	EventOption
+}
+
+// SpanEndEventOption are options that can be used at the end of a span, or with an event.
+type SpanEndEventOption interface {
+	SpanEndOption
+	EventOption
+}
+
+type attributeOption []attribute.KeyValue
+
+func (o attributeOption) applySpan(c SpanConfig) SpanConfig {
+	c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+	return c
+}
+func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o attributeOption) applyEvent(c EventConfig) EventConfig {
+	c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+	return c
+}
+
+var _ SpanStartEventOption = attributeOption{}
+
+// WithAttributes adds the attributes related to a span life-cycle event.
+// These attributes are used to describe the work a Span represents when this
+// option is provided to a Span's start or end events. Otherwise, these
+// attributes provide additional information about the event being recorded
+// (e.g. error, state change, processing progress, system event).
+//
+// If multiple of these options are passed the attributes of each successive
+// option will extend the attributes instead of overwriting. There is no
+// guarantee of uniqueness in the resulting attributes.
+func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption {
+	return attributeOption(attributes)
+}
+
+// SpanEventOption are options that can be used with an event or a span.
+type SpanEventOption interface {
+	SpanOption
+	EventOption
+}
+
+type timestampOption time.Time
+
+func (o timestampOption) applySpan(c SpanConfig) SpanConfig {
+	c.timestamp = time.Time(o)
+	return c
+}
+func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig   { return o.applySpan(c) }
+func (o timestampOption) applyEvent(c EventConfig) EventConfig {
+	c.timestamp = time.Time(o)
+	return c
+}
+
+var _ SpanEventOption = timestampOption{}
+
+// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g.
+// started, stopped, errored).
+func WithTimestamp(t time.Time) SpanEventOption {
+	return timestampOption(t)
+}
+
+type stackTraceOption bool
+
+func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
+	c.stackTrace = bool(o)
+	return c
+}
+
+func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
+	c.stackTrace = bool(o)
+	return c
+}
+func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
+
+// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false).
+func WithStackTrace(b bool) SpanEndEventOption {
+	return stackTraceOption(b)
+}
+
+// WithLinks adds links to a Span. The links are added to the existing Span
+// links, i.e. this does not overwrite. Links with invalid span context are ignored.
+func WithLinks(links ...Link) SpanStartOption {
+	return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+		cfg.links = append(cfg.links, links...)
+		return cfg
+	})
+}
+
+// WithNewRoot specifies that the Span should be treated as a root Span. Any
+// existing parent span context will be ignored when defining the Span's trace
+// identifiers.
+func WithNewRoot() SpanStartOption {
+	return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+		cfg.newRoot = true
+		return cfg
+	})
+}
+
+// WithSpanKind sets the SpanKind of a Span.
+func WithSpanKind(kind SpanKind) SpanStartOption {
+	return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+		cfg.spanKind = kind
+		return cfg
+	})
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) TracerOption {
+	return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+		cfg.instrumentationVersion = version
+		return cfg
+	})
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+	return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+		config.attrs = attribute.NewSet(attr...)
+		return config
+	})
+}
+
+// WithSchemaURL sets the schema URL for the Tracer.
+func WithSchemaURL(schemaURL string) TracerOption {
+	return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+		cfg.schemaURL = schemaURL
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..76f9a083c409637a89c17f3f70d6be84bd34f58d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "context"
+
+type traceContextKeyType int
+
+const currentSpanKey traceContextKeyType = iota
+
+// ContextWithSpan returns a copy of parent with span set as the current Span.
+func ContextWithSpan(parent context.Context, span Span) context.Context {
+	return context.WithValue(parent, currentSpanKey, span)
+}
+
+// ContextWithSpanContext returns a copy of parent with sc as the current
+// Span. The Span implementation that wraps sc is non-recording and performs
+// no operations other than to return sc as the SpanContext from the
+// SpanContext method.
+func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
+	return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
+}
+
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
+// as a remote SpanContext and as the current Span. The Span implementation
+// that wraps rsc is non-recording and performs no operations other than to
+// return rsc as the SpanContext from the SpanContext method.
+func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context {
+	return ContextWithSpanContext(parent, rsc.WithRemote(true))
+}
+
+// SpanFromContext returns the current Span from ctx.
+//
+// If no Span is currently set in ctx an implementation of a Span that
+// performs no operations is returned.
+func SpanFromContext(ctx context.Context) Span {
+	if ctx == nil {
+		return noopSpan{}
+	}
+	if span, ok := ctx.Value(currentSpanKey).(Span); ok {
+		return span
+	}
+	return noopSpan{}
+}
+
+// SpanContextFromContext returns the current Span's SpanContext.
+func SpanContextFromContext(ctx context.Context) SpanContext {
+	return SpanFromContext(ctx).SpanContext()
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..440f3d7565a122bfb64fbb94fa92450280af2007
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -0,0 +1,130 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package trace provides an implementation of the tracing part of the
+OpenTelemetry API.
+
+To participate in distributed traces a Span needs to be created for the
+operation being performed as part of a traced workflow. In its simplest form:
+
+	var tracer trace.Tracer
+
+	func init() {
+		tracer = otel.Tracer("instrumentation/package/name")
+	}
+
+	func operation(ctx context.Context) {
+		var span trace.Span
+		ctx, span = tracer.Start(ctx, "operation")
+		defer span.End()
+		// ...
+	}
+
+A Tracer is unique to the instrumentation and is used to create Spans.
+Instrumentation should be designed to accept a TracerProvider from which it
+can create its own unique Tracer. Alternatively, the registered global
+TracerProvider from the go.opentelemetry.io/otel package can be used as
+a default.
+
+	const (
+		name    = "instrumentation/package/name"
+		version = "0.1.0"
+	)
+
+	type Instrumentation struct {
+		tracer trace.Tracer
+	}
+
+	func NewInstrumentation(tp trace.TracerProvider) *Instrumentation {
+		if tp == nil {
+			tp = otel.TracerProvider()
+		}
+		return &Instrumentation{
+			tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)),
+		}
+	}
+
+	func operation(ctx context.Context, inst *Instrumentation) {
+		var span trace.Span
+		ctx, span = inst.tracer.Start(ctx, "operation")
+		defer span.End()
+		// ...
+	}
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy; all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+  - Compilation failure
+  - Panic
+  - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
+example,
+
+	import "go.opentelemetry.io/otel/trace/embedded"
+
+	type TracerProvider struct {
+		embedded.TracerProvider
+		// ...
+	}
+
+If an author wants the default behavior of their implementations to panic, they
+can embed the API interface directly.
+
+	import "go.opentelemetry.io/otel/trace"
+
+	type TracerProvider struct {
+		trace.TracerProvider
+		// ...
+	}
+
+This option is not recommended. It will lead to publishing packages that
+contain runtime panics when users update to newer versions of
+[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+dependency.
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/trace/noop]:
+
+	import "go.opentelemetry.io/otel/trace/noop"
+
+	type TracerProvider struct {
+		noop.TracerProvider
+		// ...
+	}
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
+*/
+package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
new file mode 100644
index 0000000000000000000000000000000000000000..898db5a7546ef1ee93db08ff3afd87c814a4a209
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// trace API].
+//
+// Implementers of the [OpenTelemetry trace API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry trace API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
+package embedded // import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider is embedded in
+// [go.opentelemetry.io/otel/trace.TracerProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type TracerProvider interface{ tracerProvider() }
+
+// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Tracer interface{ tracer() }
+
+// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Span interface{ span() }
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
new file mode 100644
index 0000000000000000000000000000000000000000..88fcb81611f96cc2a33c37db64727f6f97fd6d3d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
@@ -0,0 +1,27 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+	noopSpan
+
+	sc SpanContext
+}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
new file mode 100644
index 0000000000000000000000000000000000000000..c125491caebfc1776f35a3255c5170d5374275e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -0,0 +1,93 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+// NewNoopTracerProvider returns an implementation of TracerProvider that
+// performs no operations. The Tracer and Spans created from the returned
+// TracerProvider also perform no operations.
+//
+// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
+// instead.
+func NewNoopTracerProvider() TracerProvider {
+	return noopTracerProvider{}
+}
+
+type noopTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = noopTracerProvider{}
+
+// Tracer returns noop implementation of Tracer.
+func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+	return noopTracer{}
+}
+
+// noopTracer is an implementation of Tracer that performs no operations.
+type noopTracer struct{ embedded.Tracer }
+
+var _ Tracer = noopTracer{}
+
+// Start carries forward a non-recording Span, if one is present in the context, otherwise it
+// creates a no-op Span.
+func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+	span := SpanFromContext(ctx)
+	if _, ok := span.(nonRecordingSpan); !ok {
+		// span is likely already a noopSpan, but let's be sure
+		span = noopSpan{}
+	}
+	return ContextWithSpan(ctx, span), span
+}
+
+// noopSpan is an implementation of Span that performs no operations.
+type noopSpan struct{ embedded.Span }
+
+var _ Span = noopSpan{}
+
+// SpanContext returns an empty span context.
+func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
+
+// IsRecording always returns false.
+func (noopSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (noopSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (noopSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (noopSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (noopSpan) End(...SpanEndOption) {}
+
+// RecordError does nothing.
+func (noopSpan) RecordError(error, ...EventOption) {}
+
+// AddEvent does nothing.
+func (noopSpan) AddEvent(string, ...EventOption) {}
+
+// SetName does nothing.
+func (noopSpan) SetName(string) {}
+
+// TracerProvider returns a no-op TracerProvider.
+func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f485543c47dbda474ed08413512a703ef780057
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry trace API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry trace API will effectively
+// disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry trace API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+var (
+	// Compile-time check this implements the OpenTelemetry API.
+
+	_ trace.TracerProvider = TracerProvider{}
+	_ trace.Tracer         = Tracer{}
+	_ trace.Span           = Span{}
+)
+
+// TracerProvider is an OpenTelemetry No-Op TracerProvider.
+type TracerProvider struct{ embedded.TracerProvider }
+
+// NewTracerProvider returns a TracerProvider that does not record any telemetry.
+func NewTracerProvider() TracerProvider {
+	return TracerProvider{}
+}
+
+// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
+func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
+	return Tracer{}
+}
+
+// Tracer is an OpenTelemetry No-Op Tracer.
+type Tracer struct{ embedded.Tracer }
+
+// Start creates a span. The created span will be set in a child context of ctx
+// and returned with the span.
+//
+// If ctx contains a span context, the returned span will also contain that
+// span context. If the span context in ctx is for a non-recording span, that
+// span instance will be returned directly.
+func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+	span := trace.SpanFromContext(ctx)
+
+	// If the parent context contains a non-zero span context, that span
+	// context needs to be returned as a non-recording span
+	// (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
+	var zeroSC trace.SpanContext
+	if sc := span.SpanContext(); !sc.Equal(zeroSC) {
+		if !span.IsRecording() {
+			// If the span is not recording return it directly.
+			return ctx, span
+		}
+		// Otherwise, return the span context needs in a non-recording span.
+		span = Span{sc: sc}
+	} else {
+		// No parent, return a No-Op span with an empty span context.
+		span = Span{}
+	}
+	return trace.ContextWithSpan(ctx, span), span
+}
+
+// Span is an OpenTelemetry No-Op Span.
+type Span struct {
+	embedded.Span
+
+	sc trace.SpanContext
+}
+
+// SpanContext returns an empty span context.
+func (s Span) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (Span) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (Span) SetStatus(codes.Code, string) {}
+
+// SetAttributes does nothing.
+func (Span) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (Span) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (Span) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (Span) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (Span) SetName(string) {}
+
+// TracerProvider returns a No-Op TracerProvider.
+func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
new file mode 100644
index 0000000000000000000000000000000000000000..26a4b2260ec666b69f6313fee700116a8d0fae2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -0,0 +1,577 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"bytes"
+	"context"
+	"encoding/hex"
+	"encoding/json"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+const (
+	// FlagsSampled is a bitmask with the sampled bit set. A SpanContext
+	// with the sampling bit set means the span is sampled.
+	FlagsSampled = TraceFlags(0x01)
+
+	errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
+
+	errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
+	errNilTraceID           errorConst = "trace-id can't be all zero"
+
+	errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
+	errNilSpanID           errorConst = "span-id can't be all zero"
+)
+
+type errorConst string
+
+func (e errorConst) Error() string {
+	return string(e)
+}
+
+// TraceID is a unique identity of a trace.
+// nolint:revive // revive complains about stutter of `trace.TraceID`.
+type TraceID [16]byte
+
+var (
+	nilTraceID TraceID
+	_          json.Marshaler = nilTraceID
+)
+
+// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// not consist of zeros only.
+func (t TraceID) IsValid() bool {
+	return !bytes.Equal(t[:], nilTraceID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceID
+// as a hex string.
+func (t TraceID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(t.String())
+}
+
+// String returns the hex string representation form of a TraceID.
+func (t TraceID) String() string {
+	return hex.EncodeToString(t[:])
+}
+
+// SpanID is a unique identity of a span in a trace.
+type SpanID [8]byte
+
+var (
+	nilSpanID SpanID
+	_         json.Marshaler = nilSpanID
+)
+
+// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// of zeros only.
+func (s SpanID) IsValid() bool {
+	return !bytes.Equal(s[:], nilSpanID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode SpanID
+// as a hex string.
+func (s SpanID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(s.String())
+}
+
+// String returns the hex string representation form of a SpanID.
+func (s SpanID) String() string {
+	return hex.EncodeToString(s[:])
+}
+
+// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
+// the W3C trace-context specification.  See more at
+// https://www.w3.org/TR/trace-context/#trace-id
+// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
+func TraceIDFromHex(h string) (TraceID, error) {
+	t := TraceID{}
+	if len(h) != 32 {
+		return t, errInvalidTraceIDLength
+	}
+
+	if err := decodeHex(h, t[:]); err != nil {
+		return t, err
+	}
+
+	if !t.IsValid() {
+		return t, errNilTraceID
+	}
+	return t, nil
+}
+
+// SpanIDFromHex returns a SpanID from a hex string if it is compliant
+// with the w3c trace-context specification.
+// See more at https://www.w3.org/TR/trace-context/#parent-id
+func SpanIDFromHex(h string) (SpanID, error) {
+	s := SpanID{}
+	if len(h) != 16 {
+		return s, errInvalidSpanIDLength
+	}
+
+	if err := decodeHex(h, s[:]); err != nil {
+		return s, err
+	}
+
+	if !s.IsValid() {
+		return s, errNilSpanID
+	}
+	return s, nil
+}
+
+func decodeHex(h string, b []byte) error {
+	for _, r := range h {
+		switch {
+		case 'a' <= r && r <= 'f':
+			continue
+		case '0' <= r && r <= '9':
+			continue
+		default:
+			return errInvalidHexID
+		}
+	}
+
+	decoded, err := hex.DecodeString(h)
+	if err != nil {
+		return err
+	}
+
+	copy(b, decoded)
+	return nil
+}
+
+// TraceFlags contains flags that can be set on a SpanContext.
+type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
+
+// IsSampled returns if the sampling bit is set in the TraceFlags.
+func (tf TraceFlags) IsSampled() bool {
+	return tf&FlagsSampled == FlagsSampled
+}
+
+// WithSampled sets the sampling bit in a new copy of the TraceFlags.
+func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive  // sampled is not a control flag.
+	if sampled {
+		return tf | FlagsSampled
+	}
+
+	return tf &^ FlagsSampled
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceFlags
+// as a hex string.
+func (tf TraceFlags) MarshalJSON() ([]byte, error) {
+	return json.Marshal(tf.String())
+}
+
+// String returns the hex string representation form of TraceFlags.
+func (tf TraceFlags) String() string {
+	return hex.EncodeToString([]byte{byte(tf)}[:])
+}
+
+// SpanContextConfig contains mutable fields usable for constructing
+// an immutable SpanContext.
+type SpanContextConfig struct {
+	TraceID    TraceID
+	SpanID     SpanID
+	TraceFlags TraceFlags
+	TraceState TraceState
+	Remote     bool
+}
+
+// NewSpanContext constructs a SpanContext using values from the provided
+// SpanContextConfig.
+func NewSpanContext(config SpanContextConfig) SpanContext {
+	return SpanContext{
+		traceID:    config.TraceID,
+		spanID:     config.SpanID,
+		traceFlags: config.TraceFlags,
+		traceState: config.TraceState,
+		remote:     config.Remote,
+	}
+}
+
+// SpanContext contains identifying trace information about a Span.
+type SpanContext struct {
+	traceID    TraceID
+	spanID     SpanID
+	traceFlags TraceFlags
+	traceState TraceState
+	remote     bool
+}
+
+var _ json.Marshaler = SpanContext{}
+
+// IsValid returns if the SpanContext is valid. A valid span context has a
+// valid TraceID and SpanID.
+func (sc SpanContext) IsValid() bool {
+	return sc.HasTraceID() && sc.HasSpanID()
+}
+
+// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+func (sc SpanContext) IsRemote() bool {
+	return sc.remote
+}
+
+// WithRemote returns a copy of sc with the Remote property set to remote.
+func (sc SpanContext) WithRemote(remote bool) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     sc.spanID,
+		traceFlags: sc.traceFlags,
+		traceState: sc.traceState,
+		remote:     remote,
+	}
+}
+
+// TraceID returns the TraceID from the SpanContext.
+func (sc SpanContext) TraceID() TraceID {
+	return sc.traceID
+}
+
+// HasTraceID checks if the SpanContext has a valid TraceID.
+func (sc SpanContext) HasTraceID() bool {
+	return sc.traceID.IsValid()
+}
+
+// WithTraceID returns a new SpanContext with the TraceID replaced.
+func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext {
+	return SpanContext{
+		traceID:    traceID,
+		spanID:     sc.spanID,
+		traceFlags: sc.traceFlags,
+		traceState: sc.traceState,
+		remote:     sc.remote,
+	}
+}
+
+// SpanID returns the SpanID from the SpanContext.
+func (sc SpanContext) SpanID() SpanID {
+	return sc.spanID
+}
+
+// HasSpanID checks if the SpanContext has a valid SpanID.
+func (sc SpanContext) HasSpanID() bool {
+	return sc.spanID.IsValid()
+}
+
+// WithSpanID returns a new SpanContext with the SpanID replaced.
+func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     spanID,
+		traceFlags: sc.traceFlags,
+		traceState: sc.traceState,
+		remote:     sc.remote,
+	}
+}
+
+// TraceFlags returns the flags from the SpanContext.
+func (sc SpanContext) TraceFlags() TraceFlags {
+	return sc.traceFlags
+}
+
+// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+func (sc SpanContext) IsSampled() bool {
+	return sc.traceFlags.IsSampled()
+}
+
+// WithTraceFlags returns a new SpanContext with the TraceFlags replaced.
+func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     sc.spanID,
+		traceFlags: flags,
+		traceState: sc.traceState,
+		remote:     sc.remote,
+	}
+}
+
+// TraceState returns the TraceState from the SpanContext.
+func (sc SpanContext) TraceState() TraceState {
+	return sc.traceState
+}
+
+// WithTraceState returns a new SpanContext with the TraceState replaced.
+func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     sc.spanID,
+		traceFlags: sc.traceFlags,
+		traceState: state,
+		remote:     sc.remote,
+	}
+}
+
+// Equal is a predicate that determines whether two SpanContext values are equal.
+func (sc SpanContext) Equal(other SpanContext) bool {
+	return sc.traceID == other.traceID &&
+		sc.spanID == other.spanID &&
+		sc.traceFlags == other.traceFlags &&
+		sc.traceState.String() == other.traceState.String() &&
+		sc.remote == other.remote
+}
+
+// MarshalJSON implements a custom marshal function to encode a SpanContext.
+func (sc SpanContext) MarshalJSON() ([]byte, error) {
+	return json.Marshal(SpanContextConfig{
+		TraceID:    sc.traceID,
+		SpanID:     sc.spanID,
+		TraceFlags: sc.traceFlags,
+		TraceState: sc.traceState,
+		Remote:     sc.remote,
+	})
+}
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Span
+
+	// End completes the Span. The Span is considered complete and ready to be
+	// delivered through the rest of the telemetry pipeline after this method
+	// is called. Therefore, updates to the Span are not allowed after this
+	// method has been called.
+	End(options ...SpanEndOption)
+
+	// AddEvent adds an event with the provided name and options.
+	AddEvent(name string, options ...EventOption)
+
+	// IsRecording returns the recording state of the Span. It will return
+	// true if the Span is active and events can be recorded.
+	IsRecording() bool
+
+	// RecordError will record err as an exception span event for this span. An
+	// additional call to SetStatus is required if the Status of the Span should
+	// be set to Error, as this method does not change the Span status. If this
+	// span is not being recorded or err is nil then this method does nothing.
+	RecordError(err error, options ...EventOption)
+
+	// SpanContext returns the SpanContext of the Span. The returned SpanContext
+	// is usable even after the End method has been called for the Span.
+	SpanContext() SpanContext
+
+	// SetStatus sets the status of the Span in the form of a code and a
+	// description, provided the status hasn't already been set to a higher
+	// value before (OK > Error > Unset). The description is only included in a
+	// status when the code is for an error.
+	SetStatus(code codes.Code, description string)
+
+	// SetName sets the Span name.
+	SetName(name string)
+
+	// SetAttributes sets kv as attributes of the Span. If a key from kv
+	// already exists for an attribute of the Span it will be overwritten with
+	// the value contained in kv.
+	SetAttributes(kv ...attribute.KeyValue)
+
+	// TracerProvider returns a TracerProvider that can be used to generate
+	// additional Spans on the same telemetry pipeline as the current Span.
+	TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+//  1. Batch Processing: A batch of operations may contain operations
+//     associated with one or more traces/spans. Since there can only be one
+//     parent SpanContext, a Link is used to keep reference to the
+//     SpanContext of all operations in the batch.
+//  2. Public Endpoint: A SpanContext for an in incoming client request on a
+//     public endpoint should be considered untrusted. In such a case, a new
+//     trace with its own identity and sampling decision needs to be created,
+//     but this new trace needs to be related to the original trace in some
+//     form. A Link is used to keep reference to the original SpanContext and
+//     track the relationship.
+type Link struct {
+	// SpanContext of the linked Span.
+	SpanContext SpanContext
+
+	// Attributes describe the aspects of the link.
+	Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+	return Link{
+		SpanContext: SpanContextFromContext(ctx),
+		Attributes:  attrs,
+	}
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+	// SpanKindUnspecified is an unspecified SpanKind and is not a valid
+	// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+	// if it is received.
+	SpanKindUnspecified SpanKind = 0
+	// SpanKindInternal is a SpanKind for a Span that represents an internal
+	// operation within an application.
+	SpanKindInternal SpanKind = 1
+	// SpanKindServer is a SpanKind for a Span that represents the operation
+	// of handling a request from a client.
+	SpanKindServer SpanKind = 2
+	// SpanKindClient is a SpanKind for a Span that represents the operation
+	// of client making a request to a server.
+	SpanKindClient SpanKind = 3
+	// SpanKindProducer is a SpanKind for a Span that represents the operation
+	// of a producer sending a message to a message broker. Unlike
+	// SpanKindClient and SpanKindServer, there is often no direct
+	// relationship between this kind of Span and a SpanKindConsumer kind. A
+	// SpanKindProducer Span will end once the message is accepted by the
+	// message broker which might not overlap with the processing of that
+	// message.
+	SpanKindProducer SpanKind = 4
+	// SpanKindConsumer is a SpanKind for a Span that represents the operation
+	// of a consumer receiving a message from a message broker. Like
+	// SpanKindProducer Spans, there is often no direct relationship between
+	// this Span and the Span that produced the message.
+	SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value.  This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+	switch spanKind {
+	case SpanKindInternal,
+		SpanKindServer,
+		SpanKindClient,
+		SpanKindProducer,
+		SpanKindConsumer:
+		// valid
+		return spanKind
+	default:
+		return SpanKindInternal
+	}
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+	switch sk {
+	case SpanKindInternal:
+		return "internal"
+	case SpanKindServer:
+		return "server"
+	case SpanKindClient:
+		return "client"
+	case SpanKindProducer:
+		return "producer"
+	case SpanKindConsumer:
+		return "consumer"
+	default:
+		return "unspecified"
+	}
+}
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Tracer
+
+	// Start creates a span and a context.Context containing the newly-created span.
+	//
+	// If the context.Context provided in `ctx` contains a Span then the newly-created
+	// Span will be a child of that span, otherwise it will be a root span. This behavior
+	// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+	// newly-created Span to be a root span even if `ctx` contains a Span.
+	//
+	// When creating a Span it is recommended to provide all known span attributes using
+	// the `WithAttributes()` SpanOption as samplers will only have access to the
+	// attributes provided when a Span is created.
+	//
+	// Any Span that is created MUST also be ended. This is the responsibility of the user.
+	// Implementations of this API may leak memory or other resources if Spans are not ended.
+	Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.TracerProvider
+
+	// Tracer returns a unique Tracer scoped to be used by instrumentation code
+	// to trace computational workflows. The scope and identity of that
+	// instrumentation code is uniquely defined by the name and options passed.
+	//
+	// The passed name needs to uniquely identify instrumentation code.
+	// Therefore, it is recommended that name is the Go package name of the
+	// library providing instrumentation (note: not the code being
+	// instrumented). Instrumentation libraries can have multiple versions,
+	// therefore, the WithInstrumentationVersion option should be used to
+	// distinguish these different codebases. Additionally, instrumentation
+	// libraries may sometimes use traces to communicate different domains of
+	// workflow data (i.e. using spans to communicate workflow events only). If
+	// this is the case, the WithScopeAttributes option should be used to
+	// uniquely identify Tracers that handle the different domains of workflow
+	// data.
+	//
+	// If the same name and options are passed multiple times, the same Tracer
+	// will be returned (it is up to the implementation if this will be the
+	// same underlying instance of that Tracer or not). It is not necessary to
+	// call this multiple times with the same name and options to get an
+	// up-to-date Tracer. All implementations will ensure any TracerProvider
+	// configuration changes are propagated to all provided Tracers.
+	//
+	// If name is empty, then an implementation defined default name will be
+	// used instead.
+	//
+	// This method is safe to call concurrently.
+	Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
new file mode 100644
index 0000000000000000000000000000000000000000..db936ba5b73e80346662f6f13427c0b5f8f13650
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -0,0 +1,331 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+const (
+	maxListMembers = 32
+
+	listDelimiters  = ","
+	memberDelimiter = "="
+
+	errInvalidKey    errorConst = "invalid tracestate key"
+	errInvalidValue  errorConst = "invalid tracestate value"
+	errInvalidMember errorConst = "invalid tracestate list-member"
+	errMemberNumber  errorConst = "too many list-members in tracestate"
+	errDuplicate     errorConst = "duplicate list-member in tracestate"
+)
+
+type member struct {
+	Key   string
+	Value string
+}
+
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+	return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+	return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+//	value    = (0*255(chr)) nblk-chr
+//	nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+//	chr      = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+	n := len(val)
+	if n == 0 || n > 256 {
+		return false
+	}
+	for i := 0; i < n-1; i++ {
+		if !checkValueChar(val[i]) {
+			return false
+		}
+	}
+	return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+	// ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+	for _, v := range key {
+		if isAlphaNum(byte(v)) {
+			continue
+		}
+		switch v {
+		case '_', '-', '*', '/':
+			continue
+		}
+		return false
+	}
+	return true
+}
+
+// according to
+//
+//	simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+	if len(key) == 0 {
+		return false
+	}
+	first := key[0] // key's first char
+	ret := len(key[1:]) <= n
+	ret = ret && first >= 'a' && first <= 'z'
+	return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+	if c >= 'a' && c <= 'z' {
+		return true
+	}
+	return c >= '0' && c <= '9'
+}
+
+// according to
+//
+//	tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+	if len(key) == 0 {
+		return false
+	}
+	return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+//	key = simple-key / multi-tenant-key
+//	simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	multi-tenant-key = tenant-id "@" system-id
+//	tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	lcalpha    = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+	tenant, system, ok := strings.Cut(key, "@")
+	if !ok {
+		return checkKeyPart(key, 255)
+	}
+	return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
+func newMember(key, value string) (member, error) {
+	if !checkKey(key) {
+		return member{}, errInvalidKey
+	}
+	if !checkValue(value) {
+		return member{}, errInvalidValue
+	}
+	return member{Key: key, Value: value}, nil
+}
+
+func parseMember(m string) (member, error) {
+	key, val, ok := strings.Cut(m, memberDelimiter)
+	if !ok {
+		return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+	}
+	key = strings.TrimLeft(key, " \t")
+	val = strings.TrimRight(val, " \t")
+	result, e := newMember(key, val)
+	if e != nil {
+		return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+	}
+	return result, nil
+}
+
+// String encodes member into a string compliant with the W3C Trace Context
+// specification.
+func (m member) String() string {
+	return m.Key + "=" + m.Value
+}
+
+// TraceState provides additional vendor-specific trace identification
+// information across different distributed tracing systems. It represents an
+// immutable list consisting of key/value pairs, each pair is referred to as a
+// list-member.
+//
+// TraceState conforms to the W3C Trace Context specification
+// (https://www.w3.org/TR/trace-context-1). All operations that create or copy
+// a TraceState do so by validating all input and will only produce TraceState
+// that conform to the specification. Specifically, this means that all
+// list-member's key/value pairs are valid, no duplicate list-members exist,
+// and the maximum number of list-members (32) is not exceeded.
+type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState`
+	// list is the members in order.
+	list []member
+}
+
+var _ json.Marshaler = TraceState{}
+
+// ParseTraceState attempts to decode a TraceState from the passed
+// string. It returns an error if the input is invalid according to the W3C
+// Trace Context specification.
+func ParseTraceState(ts string) (TraceState, error) {
+	if ts == "" {
+		return TraceState{}, nil
+	}
+
+	wrapErr := func(err error) error {
+		return fmt.Errorf("failed to parse tracestate: %w", err)
+	}
+
+	var members []member
+	found := make(map[string]struct{})
+	for ts != "" {
+		var memberStr string
+		memberStr, ts, _ = strings.Cut(ts, listDelimiters)
+		if len(memberStr) == 0 {
+			continue
+		}
+
+		m, err := parseMember(memberStr)
+		if err != nil {
+			return TraceState{}, wrapErr(err)
+		}
+
+		if _, ok := found[m.Key]; ok {
+			return TraceState{}, wrapErr(errDuplicate)
+		}
+		found[m.Key] = struct{}{}
+
+		members = append(members, m)
+		if n := len(members); n > maxListMembers {
+			return TraceState{}, wrapErr(errMemberNumber)
+		}
+	}
+
+	return TraceState{list: members}, nil
+}
+
+// MarshalJSON marshals the TraceState into JSON.
+func (ts TraceState) MarshalJSON() ([]byte, error) {
+	return json.Marshal(ts.String())
+}
+
+// String encodes the TraceState into a string compliant with the W3C
+// Trace Context specification. The returned string will be invalid if the
+// TraceState contains any invalid members.
+func (ts TraceState) String() string {
+	if len(ts.list) == 0 {
+		return ""
+	}
+	var n int
+	n += len(ts.list)     // member delimiters: '='
+	n += len(ts.list) - 1 // list delimiters: ','
+	for _, mem := range ts.list {
+		n += len(mem.Key)
+		n += len(mem.Value)
+	}
+
+	var sb strings.Builder
+	sb.Grow(n)
+	_, _ = sb.WriteString(ts.list[0].Key)
+	_ = sb.WriteByte('=')
+	_, _ = sb.WriteString(ts.list[0].Value)
+	for i := 1; i < len(ts.list); i++ {
+		_ = sb.WriteByte(listDelimiters[0])
+		_, _ = sb.WriteString(ts.list[i].Key)
+		_ = sb.WriteByte('=')
+		_, _ = sb.WriteString(ts.list[i].Value)
+	}
+	return sb.String()
+}
+
+// Get returns the value paired with key from the corresponding TraceState
+// list-member if it exists, otherwise an empty string is returned.
+func (ts TraceState) Get(key string) string {
+	for _, member := range ts.list {
+		if member.Key == key {
+			return member.Value
+		}
+	}
+
+	return ""
+}
+
+// Insert adds a new list-member defined by the key/value pair to the
+// TraceState. If a list-member already exists for the given key, that
+// list-member's value is updated. The new or updated list-member is always
+// moved to the beginning of the TraceState as specified by the W3C Trace
+// Context specification.
+//
+// If key or value are invalid according to the W3C Trace Context
+// specification an error is returned with the original TraceState.
+//
+// If adding a new list-member means the TraceState would have more members
+// then is allowed, the new list-member will be inserted and the right-most
+// list-member will be dropped in the returned TraceState.
+func (ts TraceState) Insert(key, value string) (TraceState, error) {
+	m, err := newMember(key, value)
+	if err != nil {
+		return ts, err
+	}
+	n := len(ts.list)
+	found := n
+	for i := range ts.list {
+		if ts.list[i].Key == key {
+			found = i
+		}
+	}
+	cTS := TraceState{}
+	if found == n && n < maxListMembers {
+		cTS.list = make([]member, n+1)
+	} else {
+		cTS.list = make([]member, n)
+	}
+	cTS.list[0] = m
+	// When the number of members exceeds capacity, drop the "right-most".
+	copy(cTS.list[1:], ts.list[0:found])
+	if found < n {
+		copy(cTS.list[1+found:], ts.list[found+1:])
+	}
+	return cTS, nil
+}
+
+// Delete returns a copy of the TraceState with the list-member identified by
+// key removed.
+func (ts TraceState) Delete(key string) TraceState {
+	members := make([]member, ts.Len())
+	copy(members, ts.list)
+	for i, member := range ts.list {
+		if member.Key == key {
+			members = append(members[:i], members[i+1:]...)
+			// TraceState should contain no duplicate members.
+			break
+		}
+	}
+	return TraceState{list: members}
+}
+
+// Len returns the number of list-members in the TraceState.
+func (ts TraceState) Len() int {
+	return len(ts.list)
+}
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
new file mode 100644
index 0000000000000000000000000000000000000000..dbb61a42279596084e861431c619496717143dbd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+cd $(dirname $0)
+TOOLS_DIR=$(pwd)/.tools
+
+if [ -z "${GOPATH}" ] ; then
+	printf "GOPATH is not defined.\n"
+	exit -1
+fi
+
+if [ ! -d "${GOPATH}" ] ; then
+	printf "GOPATH ${GOPATH} is invalid \n"
+	exit -1
+fi
+
+# Pre-requisites
+if ! git diff --quiet; then \
+	git status
+	printf "\n\nError: working tree is not clean\n"
+	exit -1
+fi
+
+if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
+	printf "$(git log -1)"
+	printf "\n\nError: HEAD is not pointing to a tagged version"
+fi
+
+make ${TOOLS_DIR}/gojq
+
+DIR_TMP="${GOPATH}/src/oteltmp/"
+rm -rf $DIR_TMP
+mkdir -p $DIR_TMP
+
+printf "Copy examples to ${DIR_TMP}\n"
+cp -a ./example ${DIR_TMP}
+
+# Update go.mod files
+printf "Update go.mod: rename module and remove replace\n"
+
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
+
+for dir in $PACKAGE_DIRS; do
+	printf "  Update go.mod for $dir\n"
+	(cd "${DIR_TMP}/${dir}" && \
+	 # replaces is ("mod1" "mod2" …)
+	 replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
+	 # strip double quotes
+	 replaces=("${replaces[@]%\"}") && \
+	 replaces=("${replaces[@]#\"}") && \
+	 # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
+	 dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
+	 go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
+	 go mod tidy)
+done
+printf "Update done:\n\n"
+
+# Build directories that contain main package. These directories are different than
+# directories that contain go.mod files.
+printf "Build examples:\n"
+EXAMPLES=$(./get_main_pkgs.sh ./example)
+for ex in $EXAMPLES; do
+	printf "  Build $ex in ${DIR_TMP}/${ex}\n"
+	(cd "${DIR_TMP}/${ex}" && \
+	 go build .)
+done
+
+# Cleanup
+printf "Remove copied files.\n"
+rm -rf $DIR_TMP
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..0d998bcaf31c7d4d851da9b2e1b95e6a5a3723de
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+// Version is the current release version of OpenTelemetry in use.
+func Version() string {
+	return "1.23.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..19a73c626adadd29e16bac59c0a1bb3f9ed5492a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -0,0 +1,52 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module-sets:
+  stable-v1:
+    version: v1.23.1
+    modules:
+      - go.opentelemetry.io/otel
+      - go.opentelemetry.io/otel/bridge/opencensus
+      - go.opentelemetry.io/otel/bridge/opencensus/test
+      - go.opentelemetry.io/otel/bridge/opentracing
+      - go.opentelemetry.io/otel/bridge/opentracing/test
+      - go.opentelemetry.io/otel/example/dice
+      - go.opentelemetry.io/otel/example/namedtracer
+      - go.opentelemetry.io/otel/example/opencensus
+      - go.opentelemetry.io/otel/example/otel-collector
+      - go.opentelemetry.io/otel/example/passthrough
+      - go.opentelemetry.io/otel/example/zipkin
+      - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+      - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+      - go.opentelemetry.io/otel/exporters/otlp/otlptrace
+      - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+      - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+      - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+      - go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+      - go.opentelemetry.io/otel/exporters/zipkin
+      - go.opentelemetry.io/otel/metric
+      - go.opentelemetry.io/otel/sdk
+      - go.opentelemetry.io/otel/sdk/metric
+      - go.opentelemetry.io/otel/trace
+  experimental-metrics:
+    version: v0.45.2
+    modules:
+      - go.opentelemetry.io/otel/example/prometheus
+      - go.opentelemetry.io/otel/exporters/prometheus
+  experimental-schema:
+    version: v0.0.7
+    modules:
+      - go.opentelemetry.io/otel/schema
+excluded-modules:
+  - go.opentelemetry.io/otel/internal/tools
diff --git a/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..978efdde9325f9a378e4b93ac6c39da7338bbede
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.go
@@ -0,0 +1,367 @@
+// Copyright 2020, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/collector/logs/v1/logs_service.proto
+
+package v1
+
+import (
+	v1 "go.opentelemetry.io/proto/otlp/logs/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportLogsServiceRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceLogs.
+	// For data coming from a single resource this array will typically contain one
+	// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+	// data from multiple origins typically batch the data before forwarding further and
+	// in that case this array will contain multiple elements.
+	ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
+}
+
+func (x *ExportLogsServiceRequest) Reset() {
+	*x = ExportLogsServiceRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportLogsServiceRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportLogsServiceRequest) ProtoMessage() {}
+
+func (x *ExportLogsServiceRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportLogsServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs {
+	if x != nil {
+		return x.ResourceLogs
+	}
+	return nil
+}
+
+type ExportLogsServiceResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The details of a partially successful export request.
+	//
+	// If the request is only partially accepted
+	// (i.e. when the server accepts only parts of the data and rejects the rest)
+	// the server MUST initialize the `partial_success` field and MUST
+	// set the `rejected_<signal>` with the number of items it rejected.
+	//
+	// Servers MAY also make use of the `partial_success` field to convey
+	// warnings/suggestions to senders even when the request was fully accepted.
+	// In such cases, the `rejected_<signal>` MUST have a value of `0` and
+	// the `error_message` MUST be non-empty.
+	//
+	// A `partial_success` message with an empty value (rejected_<signal> = 0 and
+	// `error_message` = "") is equivalent to it not being set/present. Senders
+	// SHOULD interpret it the same way as in the full success case.
+	PartialSuccess *ExportLogsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportLogsServiceResponse) Reset() {
+	*x = ExportLogsServiceResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportLogsServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportLogsServiceResponse) ProtoMessage() {}
+
+func (x *ExportLogsServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportLogsServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportLogsServiceResponse) GetPartialSuccess() *ExportLogsPartialSuccess {
+	if x != nil {
+		return x.PartialSuccess
+	}
+	return nil
+}
+
+type ExportLogsPartialSuccess struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The number of rejected log records.
+	//
+	// A `rejected_<signal>` field holding a `0` value indicates that the
+	// request was fully accepted.
+	RejectedLogRecords int64 `protobuf:"varint,1,opt,name=rejected_log_records,json=rejectedLogRecords,proto3" json:"rejected_log_records,omitempty"`
+	// A developer-facing human-readable message in English. It should be used
+	// either to explain why the server rejected parts of the data during a partial
+	// success or to convey warnings/suggestions during a full success. The message
+	// should offer guidance on how users can address such issues.
+	//
+	// error_message is an optional field. An error_message with an empty value
+	// is equivalent to it not being set.
+	ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportLogsPartialSuccess) Reset() {
+	*x = ExportLogsPartialSuccess{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportLogsPartialSuccess) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportLogsPartialSuccess) ProtoMessage() {}
+
+func (x *ExportLogsPartialSuccess) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportLogsPartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportLogsPartialSuccess) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportLogsPartialSuccess) GetRejectedLogRecords() int64 {
+	if x != nil {
+		return x.RejectedLogRecords
+	}
+	return 0
+}
+
+func (x *ExportLogsPartialSuccess) GetErrorMessage() string {
+	if x != nil {
+		return x.ErrorMessage
+	}
+	return ""
+}
+
+var File_opentelemetry_proto_collector_logs_v1_logs_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc = []byte{
+	0x0a, 0x38, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+	0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x73, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76,
+	0x31, 0x1a, 0x26, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c,
+	0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x18, 0x45, 0x78, 0x70,
+	0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+	0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75,
+	0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74,
+	0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+	0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73,
+	0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x50,
+	0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70,
+	0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x71, 0x0a,
+	0x18, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69,
+	0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6a,
+	0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+	0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65,
+	0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65,
+	0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x32, 0x9d, 0x01, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x12, 0x8d, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, 0x73,
+	0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65,
+	0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x53,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+	0x42, 0x98, 0x01, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+	0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c,
+	0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x4c,
+	0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+	0x01, 0x5a, 0x30, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c,
+	0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6c, 0x6f, 0x67, 0x73,
+	0x2f, 0x76, 0x31, 0xaa, 0x02, 0x25, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+	0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData = file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_goTypes = []interface{}{
+	(*ExportLogsServiceRequest)(nil),  // 0: opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest
+	(*ExportLogsServiceResponse)(nil), // 1: opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse
+	(*ExportLogsPartialSuccess)(nil),  // 2: opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess
+	(*v1.ResourceLogs)(nil),           // 3: opentelemetry.proto.logs.v1.ResourceLogs
+}
+var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_depIdxs = []int32{
+	3, // 0: opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest.resource_logs:type_name -> opentelemetry.proto.logs.v1.ResourceLogs
+	2, // 1: opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess
+	0, // 2: opentelemetry.proto.collector.logs.v1.LogsService.Export:input_type -> opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest
+	1, // 3: opentelemetry.proto.collector.logs.v1.LogsService.Export:output_type -> opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse
+	3, // [3:4] is the sub-list for method output_type
+	2, // [2:3] is the sub-list for method input_type
+	2, // [2:2] is the sub-list for extension type_name
+	2, // [2:2] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_logs_v1_logs_service_proto_init() }
+func file_opentelemetry_proto_collector_logs_v1_logs_service_proto_init() {
+	if File_opentelemetry_proto_collector_logs_v1_logs_service_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportLogsServiceRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportLogsServiceResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportLogsPartialSuccess); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   3,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_opentelemetry_proto_collector_logs_v1_logs_service_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_collector_logs_v1_logs_service_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_collector_logs_v1_logs_service_proto = out.File
+	file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc = nil
+	file_opentelemetry_proto_collector_logs_v1_logs_service_proto_goTypes = nil
+	file_opentelemetry_proto_collector_logs_v1_logs_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.gw.go
new file mode 100644
index 0000000000000000000000000000000000000000..d34f32d91bf11000ec39dd79f81908dd58f20e14
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.gw.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/logs/v1/logs_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportLogsServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportLogsServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := server.Export(ctx, &protoReq)
+	return msg, metadata, err
+
+}
+
+// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux".
+// UnaryRPC     :call LogsServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLogsServiceHandlerFromEndpoint instead.
+func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error {
+
+	mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		var stream runtime.ServerTransportStream
+		ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		var err error
+		var annotatedContext context.Context
+		annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", runtime.WithHTTPPathPattern("/v1/logs"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := local_request_LogsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+		md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+		annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+		if err != nil {
+			runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_LogsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterLogsServiceHandler(ctx, mux, conn)
+}
+
+// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn))
+}
+
+// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LogsServiceClient" to call the correct interceptors.
+func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error {
+
+	mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		var err error
+		var annotatedContext context.Context
+		annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", runtime.WithHTTPPathPattern("/v1/logs"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_LogsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+		annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+		if err != nil {
+			runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_LogsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, ""))
+)
+
+var (
+	forward_LogsService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1b7c457c9068748e79cf1300e72bbc2be74b6d6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc             v3.21.6
+// source: opentelemetry/proto/collector/logs/v1/logs_service.proto
+
+package v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// LogsServiceClient is the client API for LogsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type LogsServiceClient interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error)
+}
+
+type logsServiceClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewLogsServiceClient(cc grpc.ClientConnInterface) LogsServiceClient {
+	return &logsServiceClient{cc}
+}
+
+func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) {
+	out := new(ExportLogsServiceResponse)
+	err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// LogsServiceServer is the server API for LogsService service.
+// All implementations must embed UnimplementedLogsServiceServer
+// for forward compatibility
+type LogsServiceServer interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error)
+	mustEmbedUnimplementedLogsServiceServer()
+}
+
+// UnimplementedLogsServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedLogsServiceServer struct {
+}
+
+func (UnimplementedLogsServiceServer) Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedLogsServiceServer) mustEmbedUnimplementedLogsServiceServer() {}
+
+// UnsafeLogsServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to LogsServiceServer will
+// result in compilation errors.
+type UnsafeLogsServiceServer interface {
+	mustEmbedUnimplementedLogsServiceServer()
+}
+
+func RegisterLogsServiceServer(s grpc.ServiceRegistrar, srv LogsServiceServer) {
+	s.RegisterService(&LogsService_ServiceDesc, srv)
+}
+
+func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ExportLogsServiceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LogsServiceServer).Export(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+// LogsService_ServiceDesc is the grpc.ServiceDesc for LogsService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var LogsService_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService",
+	HandlerType: (*LogsServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Export",
+			Handler:    _LogsService_Export_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..8e13d157c2c77c6efb92cab33a50d6de89235121
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go
@@ -0,0 +1,371 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
+
+package v1
+
+import (
+	v1 "go.opentelemetry.io/proto/otlp/metrics/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportMetricsServiceRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceMetrics.
+	// For data coming from a single resource this array will typically contain one
+	// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+	// data from multiple origins typically batch the data before forwarding further and
+	// in that case this array will contain multiple elements.
+	ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
+}
+
+func (x *ExportMetricsServiceRequest) Reset() {
+	*x = ExportMetricsServiceRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportMetricsServiceRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsServiceRequest) ProtoMessage() {}
+
+func (x *ExportMetricsServiceRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics {
+	if x != nil {
+		return x.ResourceMetrics
+	}
+	return nil
+}
+
+type ExportMetricsServiceResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The details of a partially successful export request.
+	//
+	// If the request is only partially accepted
+	// (i.e. when the server accepts only parts of the data and rejects the rest)
+	// the server MUST initialize the `partial_success` field and MUST
+	// set the `rejected_<signal>` with the number of items it rejected.
+	//
+	// Servers MAY also make use of the `partial_success` field to convey
+	// warnings/suggestions to senders even when the request was fully accepted.
+	// In such cases, the `rejected_<signal>` MUST have a value of `0` and
+	// the `error_message` MUST be non-empty.
+	//
+	// A `partial_success` message with an empty value (rejected_<signal> = 0 and
+	// `error_message` = "") is equivalent to it not being set/present. Senders
+	// SHOULD interpret it the same way as in the full success case.
+	PartialSuccess *ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportMetricsServiceResponse) Reset() {
+	*x = ExportMetricsServiceResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportMetricsServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsServiceResponse) ProtoMessage() {}
+
+func (x *ExportMetricsServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportMetricsServiceResponse) GetPartialSuccess() *ExportMetricsPartialSuccess {
+	if x != nil {
+		return x.PartialSuccess
+	}
+	return nil
+}
+
+type ExportMetricsPartialSuccess struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The number of rejected data points.
+	//
+	// A `rejected_<signal>` field holding a `0` value indicates that the
+	// request was fully accepted.
+	RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"`
+	// A developer-facing human-readable message in English. It should be used
+	// either to explain why the server rejected parts of the data during a partial
+	// success or to convey warnings/suggestions during a full success. The message
+	// should offer guidance on how users can address such issues.
+	//
+	// error_message is an optional field. An error_message with an empty value
+	// is equivalent to it not being set.
+	ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportMetricsPartialSuccess) Reset() {
+	*x = ExportMetricsPartialSuccess{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportMetricsPartialSuccess) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportMetricsPartialSuccess) ProtoMessage() {}
+
+func (x *ExportMetricsPartialSuccess) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportMetricsPartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 {
+	if x != nil {
+		return x.RejectedDataPoints
+	}
+	return 0
+}
+
+func (x *ExportMetricsPartialSuccess) GetErrorMessage() string {
+	if x != nil {
+		return x.ErrorMessage
+	}
+	return ""
+}
+
+var File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = []byte{
+	0x0a, 0x3e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+	0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+	0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+	0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f,
+	0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75,
+	0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
+	0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72,
+	0x69, 0x63, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f,
+	0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63,
+	0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63,
+	0x63, 0x65, 0x73, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63,
+	0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f,
+	0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x03, 0x52, 0x12, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x50,
+	0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72,
+	0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xac, 0x01, 0x0a, 0x0e, 0x4d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x99, 0x01,
+	0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
+	0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+	0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+	0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+	0x46, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+	0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72,
+	0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xa4, 0x01, 0x0a, 0x2b, 0x69, 0x6f,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+	0x5a, 0x33, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70,
+	0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x73, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x28, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65,
+	0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
+	0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31,
+	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = []interface{}{
+	(*ExportMetricsServiceRequest)(nil),  // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest
+	(*ExportMetricsServiceResponse)(nil), // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse
+	(*ExportMetricsPartialSuccess)(nil),  // 2: opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess
+	(*v1.ResourceMetrics)(nil),           // 3: opentelemetry.proto.metrics.v1.ResourceMetrics
+}
+var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = []int32{
+	3, // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics
+	2, // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess
+	0, // 2: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:input_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest
+	1, // 3: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:output_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse
+	3, // [3:4] is the sub-list for method output_type
+	2, // [2:3] is the sub-list for method input_type
+	2, // [2:2] is the sub-list for extension type_name
+	2, // [2:2] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() }
+func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() {
+	if File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportMetricsServiceRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportMetricsServiceResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportMetricsPartialSuccess); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   3,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto = out.File
+	file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = nil
+	file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = nil
+	file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go
new file mode 100644
index 0000000000000000000000000000000000000000..c42f1454ed2d507f4d18199e3c639100c64a25ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportMetricsServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportMetricsServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := server.Export(ctx, &protoReq)
+	return msg, metadata, err
+
+}
+
+// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux".
+// UnaryRPC     :call MetricsServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead.
+func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error {
+
+	mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		var stream runtime.ServerTransportStream
+		ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		var err error
+		var annotatedContext context.Context
+		annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := local_request_MetricsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+		md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+		annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+		if err != nil {
+			runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterMetricsServiceHandler(ctx, mux, conn)
+}
+
+// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn))
+}
+
+// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "MetricsServiceClient" to call the correct interceptors.
+func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error {
+
+	mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		var err error
+		var annotatedContext context.Context
+		annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_MetricsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+		annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+		if err != nil {
+			runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, ""))
+)
+
+var (
+	forward_MetricsService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..31d25fc15ac66d38ee3ee7eb8d7e25821bdccb2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc             v3.21.6
+// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
+
+package v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// MetricsServiceClient is the client API for MetricsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type MetricsServiceClient interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
+}
+
+type metricsServiceClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient {
+	return &metricsServiceClient{cc}
+}
+
+func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) {
+	out := new(ExportMetricsServiceResponse)
+	err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// MetricsServiceServer is the server API for MetricsService service.
+// All implementations must embed UnimplementedMetricsServiceServer
+// for forward compatibility
+type MetricsServiceServer interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
+	mustEmbedUnimplementedMetricsServiceServer()
+}
+
+// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedMetricsServiceServer struct {
+}
+
+func (UnimplementedMetricsServiceServer) Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {}
+
+// UnsafeMetricsServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to MetricsServiceServer will
+// result in compilation errors.
+type UnsafeMetricsServiceServer interface {
+	mustEmbedUnimplementedMetricsServiceServer()
+}
+
+func RegisterMetricsServiceServer(s grpc.ServiceRegistrar, srv MetricsServiceServer) {
+	s.RegisterService(&MetricsService_ServiceDesc, srv)
+}
+
+func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ExportMetricsServiceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MetricsServiceServer).Export(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+// MetricsService_ServiceDesc is the grpc.ServiceDesc for MetricsService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var MetricsService_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
+	HandlerType: (*MetricsServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Export",
+			Handler:    _MetricsService_Export_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..c1af04e84e51c6b73b791a05961d6163ab60c32b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
@@ -0,0 +1,367 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+	v1 "go.opentelemetry.io/proto/otlp/trace/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportTraceServiceRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceSpans.
+	// For data coming from a single resource this array will typically contain one
+	// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+	// data from multiple origins typically batch the data before forwarding further and
+	// in that case this array will contain multiple elements.
+	ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *ExportTraceServiceRequest) Reset() {
+	*x = ExportTraceServiceRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportTraceServiceRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceRequest) ProtoMessage() {}
+
+func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
+	if x != nil {
+		return x.ResourceSpans
+	}
+	return nil
+}
+
+type ExportTraceServiceResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The details of a partially successful export request.
+	//
+	// If the request is only partially accepted
+	// (i.e. when the server accepts only parts of the data and rejects the rest)
+	// the server MUST initialize the `partial_success` field and MUST
+	// set the `rejected_<signal>` with the number of items it rejected.
+	//
+	// Servers MAY also make use of the `partial_success` field to convey
+	// warnings/suggestions to senders even when the request was fully accepted.
+	// In such cases, the `rejected_<signal>` MUST have a value of `0` and
+	// the `error_message` MUST be non-empty.
+	//
+	// A `partial_success` message with an empty value (rejected_<signal> = 0 and
+	// `error_message` = "") is equivalent to it not being set/present. Senders
+	// SHOULD interpret it the same way as in the full success case.
+	PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportTraceServiceResponse) Reset() {
+	*x = ExportTraceServiceResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportTraceServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceResponse) ProtoMessage() {}
+
+func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess {
+	if x != nil {
+		return x.PartialSuccess
+	}
+	return nil
+}
+
+type ExportTracePartialSuccess struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The number of rejected spans.
+	//
+	// A `rejected_<signal>` field holding a `0` value indicates that the
+	// request was fully accepted.
+	RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
+	// A developer-facing human-readable message in English. It should be used
+	// either to explain why the server rejected parts of the data during a partial
+	// success or to convey warnings/suggestions during a full success. The message
+	// should offer guidance on how users can address such issues.
+	//
+	// error_message is an optional field. An error_message with an empty value
+	// is equivalent to it not being set.
+	ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportTracePartialSuccess) Reset() {
+	*x = ExportTracePartialSuccess{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportTracePartialSuccess) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTracePartialSuccess) ProtoMessage() {}
+
+func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 {
+	if x != nil {
+		return x.RejectedSpans
+	}
+	return 0
+}
+
+func (x *ExportTracePartialSuccess) GetErrorMessage() string {
+	if x != nil {
+		return x.ErrorMessage
+	}
+	return ""
+}
+
+var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{
+	0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63,
+	0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
+	0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f,
+	0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+	0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+	0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22,
+	0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a,
+	0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+	0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+	0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+	0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+	0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74,
+	0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78,
+	0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+	0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63,
+	0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+	0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23,
+	0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73,
+	0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12,
+	0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54,
+	0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+	0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+	0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f,
+	0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+	0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72,
+	0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c,
+	0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02,
+	0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54,
+	0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{
+	(*ExportTraceServiceRequest)(nil),  // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+	(*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+	(*ExportTracePartialSuccess)(nil),  // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+	(*v1.ResourceSpans)(nil),           // 3: opentelemetry.proto.trace.v1.ResourceSpans
+}
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{
+	3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+	2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+	0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+	1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+	3, // [3:4] is the sub-list for method output_type
+	2, // [2:3] is the sub-list for method input_type
+	2, // [2:2] is the sub-list for extension type_name
+	2, // [2:2] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() }
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() {
+	if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportTraceServiceRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportTraceServiceResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportTracePartialSuccess); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   3,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb1bd261ed831fb3d04d20dbe8eaa2d729c33636
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportTraceServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportTraceServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := server.Export(ctx, &protoReq)
+	return msg, metadata, err
+
+}
+
+// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux".
+// UnaryRPC     :call TraceServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead.
+func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error {
+
+	mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		var stream runtime.ServerTransportStream
+		ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		var err error
+		var annotatedContext context.Context
+		annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+		md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+		annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+		if err != nil {
+			runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterTraceServiceHandler(ctx, mux, conn)
+}
+
+// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
+}
+
+// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "TraceServiceClient" to call the correct interceptors.
+func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
+
+	mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		var err error
+		var annotatedContext context.Context
+		annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+		annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+		if err != nil {
+			runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, ""))
+)
+
+var (
+	forward_TraceService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd1b73f1e99797d2c523d7f67305a25b64cda335
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc             v3.21.6
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type TraceServiceClient interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
+}
+
+type traceServiceClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
+	return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
+	out := new(ExportTraceServiceResponse)
+	err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+// All implementations must embed UnimplementedTraceServiceServer
+// for forward compatibility
+type TraceServiceServer interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
+	mustEmbedUnimplementedTraceServiceServer()
+}
+
+// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedTraceServiceServer struct {
+}
+
+func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {}
+
+// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to TraceServiceServer will
+// result in compilation errors.
+type UnsafeTraceServiceServer interface {
+	mustEmbedUnimplementedTraceServiceServer()
+}
+
+func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) {
+	s.RegisterService(&TraceService_ServiceDesc, srv)
+}
+
+func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ExportTraceServiceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TraceServiceServer).Export(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var TraceService_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
+	HandlerType: (*TraceServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Export",
+			Handler:    _TraceService_Export_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..852209b097bd123506023ee622b3efefbd3f00f1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -0,0 +1,630 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/common/v1/common.proto
+
+package v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// primitive value such as a string or integer or it may contain an arbitrary nested
+// object containing arrays, key-value lists and primitives.
+type AnyValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The value is one of the listed fields. It is valid for all values to be unspecified
+	// in which case this AnyValue is considered to be "empty".
+	//
+	// Types that are assignable to Value:
+	//	*AnyValue_StringValue
+	//	*AnyValue_BoolValue
+	//	*AnyValue_IntValue
+	//	*AnyValue_DoubleValue
+	//	*AnyValue_ArrayValue
+	//	*AnyValue_KvlistValue
+	//	*AnyValue_BytesValue
+	Value isAnyValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *AnyValue) Reset() {
+	*x = AnyValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *AnyValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AnyValue) ProtoMessage() {}
+
+func (x *AnyValue) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead.
+func (*AnyValue) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *AnyValue) GetValue() isAnyValue_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (x *AnyValue) GetStringValue() string {
+	if x, ok := x.GetValue().(*AnyValue_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *AnyValue) GetBoolValue() bool {
+	if x, ok := x.GetValue().(*AnyValue_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *AnyValue) GetIntValue() int64 {
+	if x, ok := x.GetValue().(*AnyValue_IntValue); ok {
+		return x.IntValue
+	}
+	return 0
+}
+
+func (x *AnyValue) GetDoubleValue() float64 {
+	if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok {
+		return x.DoubleValue
+	}
+	return 0
+}
+
+func (x *AnyValue) GetArrayValue() *ArrayValue {
+	if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok {
+		return x.ArrayValue
+	}
+	return nil
+}
+
+func (x *AnyValue) GetKvlistValue() *KeyValueList {
+	if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok {
+		return x.KvlistValue
+	}
+	return nil
+}
+
+func (x *AnyValue) GetBytesValue() []byte {
+	if x, ok := x.GetValue().(*AnyValue_BytesValue); ok {
+		return x.BytesValue
+	}
+	return nil
+}
+
+type isAnyValue_Value interface {
+	isAnyValue_Value()
+}
+
+type AnyValue_StringValue struct {
+	StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AnyValue_BoolValue struct {
+	BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AnyValue_IntValue struct {
+	IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AnyValue_DoubleValue struct {
+	DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type AnyValue_ArrayValue struct {
+	ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
+}
+
+type AnyValue_KvlistValue struct {
+	KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
+}
+
+type AnyValue_BytesValue struct {
+	BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+func (*AnyValue_StringValue) isAnyValue_Value() {}
+
+func (*AnyValue_BoolValue) isAnyValue_Value() {}
+
+func (*AnyValue_IntValue) isAnyValue_Value() {}
+
+func (*AnyValue_DoubleValue) isAnyValue_Value() {}
+
+func (*AnyValue_ArrayValue) isAnyValue_Value() {}
+
+func (*AnyValue_KvlistValue) isAnyValue_Value() {}
+
+func (*AnyValue_BytesValue) isAnyValue_Value() {}
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
+// since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Array of values. The array may be empty (contain 0 elements).
+	Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ArrayValue) Reset() {
+	*x = ArrayValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ArrayValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ArrayValue) ProtoMessage() {}
+
+func (x *ArrayValue) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead.
+func (*ArrayValue) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ArrayValue) GetValues() []*AnyValue {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
+// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
+// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
+// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
+// are semantically equivalent.
+type KeyValueList struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A collection of key/value pairs of key-value pairs. The list may be empty (may
+	// contain 0 elements).
+	// The keys MUST be unique (it is not allowed to have more than one
+	// value with the same key).
+	Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *KeyValueList) Reset() {
+	*x = KeyValueList{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *KeyValueList) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValueList) ProtoMessage() {}
+
+func (x *KeyValueList) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead.
+func (*KeyValueList) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *KeyValueList) GetValues() []*KeyValue {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+// KeyValue is a key-value pair that is used to store Span attributes, Link
+// attributes, etc.
+type KeyValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Key   string    `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KeyValue) Reset() {
+	*x = KeyValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *KeyValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValue) ProtoMessage() {}
+
+func (x *KeyValue) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
+func (*KeyValue) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *KeyValue) GetKey() string {
+	if x != nil {
+		return x.Key
+	}
+	return ""
+}
+
+func (x *KeyValue) GetValue() *AnyValue {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+// InstrumentationScope is a message representing the instrumentation scope information
+// such as the fully qualified name and version.
+type InstrumentationScope struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An empty instrumentation scope name means the name is unknown.
+	Name    string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	// Additional attributes that describe the scope. [Optional].
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes             []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	DroppedAttributesCount uint32      `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *InstrumentationScope) Reset() {
+	*x = InstrumentationScope{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *InstrumentationScope) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InstrumentationScope) ProtoMessage() {}
+
+func (x *InstrumentationScope) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead.
+func (*InstrumentationScope) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *InstrumentationScope) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *InstrumentationScope) GetVersion() string {
+	if x != nil {
+		return x.Version
+	}
+	return ""
+}
+
+func (x *InstrumentationScope) GetAttributes() []*KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{
+	0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08,
+	0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69,
+	0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+	0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a,
+	0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d,
+	0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a,
+	0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20,
+	0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+	0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c,
+	0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d,
+	0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a,
+	0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a,
+	0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+	0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b,
+	0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14,
+	0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+	0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+	0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+	0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+	0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+	0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64,
+	0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+	0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64,
+	0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+	0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f,
+	0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+	0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc
+)
+
+func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescData
+}
+
+var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{
+	(*AnyValue)(nil),             // 0: opentelemetry.proto.common.v1.AnyValue
+	(*ArrayValue)(nil),           // 1: opentelemetry.proto.common.v1.ArrayValue
+	(*KeyValueList)(nil),         // 2: opentelemetry.proto.common.v1.KeyValueList
+	(*KeyValue)(nil),             // 3: opentelemetry.proto.common.v1.KeyValue
+	(*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope
+}
+var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{
+	1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue
+	2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList
+	0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue
+	3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue
+	0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue
+	3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	6, // [6:6] is the sub-list for method output_type
+	6, // [6:6] is the sub-list for method input_type
+	6, // [6:6] is the sub-list for extension type_name
+	6, // [6:6] is the sub-list for extension extendee
+	0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_common_v1_common_proto_init() }
+func file_opentelemetry_proto_common_v1_common_proto_init() {
+	if File_opentelemetry_proto_common_v1_common_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*AnyValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ArrayValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*KeyValueList); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*KeyValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*InstrumentationScope); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{
+		(*AnyValue_StringValue)(nil),
+		(*AnyValue_BoolValue)(nil),
+		(*AnyValue_IntValue)(nil),
+		(*AnyValue_DoubleValue)(nil),
+		(*AnyValue_ArrayValue)(nil),
+		(*AnyValue_KvlistValue)(nil),
+		(*AnyValue_BytesValue)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   5,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_common_v1_common_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_common_v1_common_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_common_v1_common_proto = out.File
+	file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil
+	file_opentelemetry_proto_common_v1_common_proto_goTypes = nil
+	file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..e0246bb07869782390421b7e74e60a8afc55f396
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go
@@ -0,0 +1,847 @@
+// Copyright 2020, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/logs/v1/logs.proto
+
+package v1
+
+import (
+	v11 "go.opentelemetry.io/proto/otlp/common/v1"
+	v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Possible values for LogRecord.SeverityNumber.
+type SeverityNumber int32
+
+const (
+	// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
+	SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0
+	SeverityNumber_SEVERITY_NUMBER_TRACE       SeverityNumber = 1
+	SeverityNumber_SEVERITY_NUMBER_TRACE2      SeverityNumber = 2
+	SeverityNumber_SEVERITY_NUMBER_TRACE3      SeverityNumber = 3
+	SeverityNumber_SEVERITY_NUMBER_TRACE4      SeverityNumber = 4
+	SeverityNumber_SEVERITY_NUMBER_DEBUG       SeverityNumber = 5
+	SeverityNumber_SEVERITY_NUMBER_DEBUG2      SeverityNumber = 6
+	SeverityNumber_SEVERITY_NUMBER_DEBUG3      SeverityNumber = 7
+	SeverityNumber_SEVERITY_NUMBER_DEBUG4      SeverityNumber = 8
+	SeverityNumber_SEVERITY_NUMBER_INFO        SeverityNumber = 9
+	SeverityNumber_SEVERITY_NUMBER_INFO2       SeverityNumber = 10
+	SeverityNumber_SEVERITY_NUMBER_INFO3       SeverityNumber = 11
+	SeverityNumber_SEVERITY_NUMBER_INFO4       SeverityNumber = 12
+	SeverityNumber_SEVERITY_NUMBER_WARN        SeverityNumber = 13
+	SeverityNumber_SEVERITY_NUMBER_WARN2       SeverityNumber = 14
+	SeverityNumber_SEVERITY_NUMBER_WARN3       SeverityNumber = 15
+	SeverityNumber_SEVERITY_NUMBER_WARN4       SeverityNumber = 16
+	SeverityNumber_SEVERITY_NUMBER_ERROR       SeverityNumber = 17
+	SeverityNumber_SEVERITY_NUMBER_ERROR2      SeverityNumber = 18
+	SeverityNumber_SEVERITY_NUMBER_ERROR3      SeverityNumber = 19
+	SeverityNumber_SEVERITY_NUMBER_ERROR4      SeverityNumber = 20
+	SeverityNumber_SEVERITY_NUMBER_FATAL       SeverityNumber = 21
+	SeverityNumber_SEVERITY_NUMBER_FATAL2      SeverityNumber = 22
+	SeverityNumber_SEVERITY_NUMBER_FATAL3      SeverityNumber = 23
+	SeverityNumber_SEVERITY_NUMBER_FATAL4      SeverityNumber = 24
+)
+
+// Enum value maps for SeverityNumber.
+var (
+	SeverityNumber_name = map[int32]string{
+		0:  "SEVERITY_NUMBER_UNSPECIFIED",
+		1:  "SEVERITY_NUMBER_TRACE",
+		2:  "SEVERITY_NUMBER_TRACE2",
+		3:  "SEVERITY_NUMBER_TRACE3",
+		4:  "SEVERITY_NUMBER_TRACE4",
+		5:  "SEVERITY_NUMBER_DEBUG",
+		6:  "SEVERITY_NUMBER_DEBUG2",
+		7:  "SEVERITY_NUMBER_DEBUG3",
+		8:  "SEVERITY_NUMBER_DEBUG4",
+		9:  "SEVERITY_NUMBER_INFO",
+		10: "SEVERITY_NUMBER_INFO2",
+		11: "SEVERITY_NUMBER_INFO3",
+		12: "SEVERITY_NUMBER_INFO4",
+		13: "SEVERITY_NUMBER_WARN",
+		14: "SEVERITY_NUMBER_WARN2",
+		15: "SEVERITY_NUMBER_WARN3",
+		16: "SEVERITY_NUMBER_WARN4",
+		17: "SEVERITY_NUMBER_ERROR",
+		18: "SEVERITY_NUMBER_ERROR2",
+		19: "SEVERITY_NUMBER_ERROR3",
+		20: "SEVERITY_NUMBER_ERROR4",
+		21: "SEVERITY_NUMBER_FATAL",
+		22: "SEVERITY_NUMBER_FATAL2",
+		23: "SEVERITY_NUMBER_FATAL3",
+		24: "SEVERITY_NUMBER_FATAL4",
+	}
+	SeverityNumber_value = map[string]int32{
+		"SEVERITY_NUMBER_UNSPECIFIED": 0,
+		"SEVERITY_NUMBER_TRACE":       1,
+		"SEVERITY_NUMBER_TRACE2":      2,
+		"SEVERITY_NUMBER_TRACE3":      3,
+		"SEVERITY_NUMBER_TRACE4":      4,
+		"SEVERITY_NUMBER_DEBUG":       5,
+		"SEVERITY_NUMBER_DEBUG2":      6,
+		"SEVERITY_NUMBER_DEBUG3":      7,
+		"SEVERITY_NUMBER_DEBUG4":      8,
+		"SEVERITY_NUMBER_INFO":        9,
+		"SEVERITY_NUMBER_INFO2":       10,
+		"SEVERITY_NUMBER_INFO3":       11,
+		"SEVERITY_NUMBER_INFO4":       12,
+		"SEVERITY_NUMBER_WARN":        13,
+		"SEVERITY_NUMBER_WARN2":       14,
+		"SEVERITY_NUMBER_WARN3":       15,
+		"SEVERITY_NUMBER_WARN4":       16,
+		"SEVERITY_NUMBER_ERROR":       17,
+		"SEVERITY_NUMBER_ERROR2":      18,
+		"SEVERITY_NUMBER_ERROR3":      19,
+		"SEVERITY_NUMBER_ERROR4":      20,
+		"SEVERITY_NUMBER_FATAL":       21,
+		"SEVERITY_NUMBER_FATAL2":      22,
+		"SEVERITY_NUMBER_FATAL3":      23,
+		"SEVERITY_NUMBER_FATAL4":      24,
+	}
+)
+
+func (x SeverityNumber) Enum() *SeverityNumber {
+	p := new(SeverityNumber)
+	*p = x
+	return p
+}
+
+func (x SeverityNumber) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SeverityNumber) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[0].Descriptor()
+}
+
+func (SeverityNumber) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[0]
+}
+
+func (x SeverityNumber) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SeverityNumber.Descriptor instead.
+func (SeverityNumber) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{0}
+}
+
+// LogRecordFlags represents constants used to interpret the
+// LogRecord.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask.  To extract the bit-field, for example, use an
+// expression like:
+//
+//   (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
+//
+type LogRecordFlags int32
+
+const (
+	// The zero value for the enum. Should not be used for comparisons.
+	// Instead use bitwise "and" with the appropriate mask as shown above.
+	LogRecordFlags_LOG_RECORD_FLAGS_DO_NOT_USE LogRecordFlags = 0
+	// Bits 0-7 are used for trace flags.
+	LogRecordFlags_LOG_RECORD_FLAGS_TRACE_FLAGS_MASK LogRecordFlags = 255
+)
+
+// Enum value maps for LogRecordFlags.
+var (
+	LogRecordFlags_name = map[int32]string{
+		0:   "LOG_RECORD_FLAGS_DO_NOT_USE",
+		255: "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK",
+	}
+	LogRecordFlags_value = map[string]int32{
+		"LOG_RECORD_FLAGS_DO_NOT_USE":       0,
+		"LOG_RECORD_FLAGS_TRACE_FLAGS_MASK": 255,
+	}
+)
+
+func (x LogRecordFlags) Enum() *LogRecordFlags {
+	p := new(LogRecordFlags)
+	*p = x
+	return p
+}
+
+func (x LogRecordFlags) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (LogRecordFlags) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[1].Descriptor()
+}
+
+func (LogRecordFlags) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[1]
+}
+
+func (x LogRecordFlags) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use LogRecordFlags.Descriptor instead.
+func (LogRecordFlags) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{1}
+}
+
+// LogsData represents the logs data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP logs data but do not
+// implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type LogsData struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceLogs.
+	// For data coming from a single resource this array will typically contain
+	// one element. Intermediary nodes that receive data from multiple origins
+	// typically batch the data before forwarding further and in that case this
+	// array will contain multiple elements.
+	ResourceLogs []*ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
+}
+
+func (x *LogsData) Reset() {
+	*x = LogsData{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LogsData) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogsData) ProtoMessage() {}
+
+func (x *LogsData) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogsData.ProtoReflect.Descriptor instead.
+func (*LogsData) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *LogsData) GetResourceLogs() []*ResourceLogs {
+	if x != nil {
+		return x.ResourceLogs
+	}
+	return nil
+}
+
+// A collection of ScopeLogs from a Resource.
+type ResourceLogs struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The resource for the logs in this message.
+	// If this field is not set then resource info is unknown.
+	Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+	// A list of ScopeLogs that originate from a resource.
+	ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"`
+	// The Schema URL, if known. This is the identifier of the Schema that the resource data
+	// is recorded in. To learn more about Schema URL see
+	// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+	// This schema_url applies to the data in the "resource" field. It does not apply
+	// to the data in the "scope_logs" field which have their own schema_url field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceLogs) Reset() {
+	*x = ResourceLogs{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceLogs) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceLogs) ProtoMessage() {}
+
+func (x *ResourceLogs) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceLogs.ProtoReflect.Descriptor instead.
+func (*ResourceLogs) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceLogs) GetResource() *v1.Resource {
+	if x != nil {
+		return x.Resource
+	}
+	return nil
+}
+
+func (x *ResourceLogs) GetScopeLogs() []*ScopeLogs {
+	if x != nil {
+		return x.ScopeLogs
+	}
+	return nil
+}
+
+func (x *ResourceLogs) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A collection of Logs produced by a Scope.
+type ScopeLogs struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The instrumentation scope information for the logs in this message.
+	// Semantically when InstrumentationScope isn't set, it is equivalent with
+	// an empty instrumentation scope name (unknown).
+	Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+	// A list of log records.
+	LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"`
+	// The Schema URL, if known. This is the identifier of the Schema that the log data
+	// is recorded in. To learn more about Schema URL see
+	// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+	// This schema_url applies to all logs in the "logs" field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeLogs) Reset() {
+	*x = ScopeLogs{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ScopeLogs) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeLogs) ProtoMessage() {}
+
+func (x *ScopeLogs) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeLogs.ProtoReflect.Descriptor instead.
+func (*ScopeLogs) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeLogs) GetScope() *v11.InstrumentationScope {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *ScopeLogs) GetLogRecords() []*LogRecord {
+	if x != nil {
+		return x.LogRecords
+	}
+	return nil
+}
+
+func (x *ScopeLogs) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A log record according to OpenTelemetry Log Data Model:
+// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md
+type LogRecord struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// time_unix_nano is the time when the event occurred.
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+	// Value of 0 indicates unknown or missing timestamp.
+	TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// Time when the event was observed by the collection system.
+	// For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK)
+	// this timestamp is typically set at the generation time and is equal to Timestamp.
+	// For events originating externally and collected by OpenTelemetry (e.g. using
+	// Collector) this is the time when OpenTelemetry's code observed the event measured
+	// by the clock of the OpenTelemetry code. This field MUST be set once the event is
+	// observed by OpenTelemetry.
+	//
+	// For converting OpenTelemetry log data to formats that support only one timestamp or
+	// when receiving OpenTelemetry log data by recipients that support only one timestamp
+	// internally the following logic is recommended:
+	//   - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+	// Value of 0 indicates unknown or missing timestamp.
+	ObservedTimeUnixNano uint64 `protobuf:"fixed64,11,opt,name=observed_time_unix_nano,json=observedTimeUnixNano,proto3" json:"observed_time_unix_nano,omitempty"`
+	// Numerical value of the severity, normalized to values described in Log Data Model.
+	// [Optional].
+	SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"`
+	// The severity text (also known as log level). The original string representation as
+	// it is known at the source. [Optional].
+	SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"`
+	// A value containing the body of the log record. Can be for example a human-readable
+	// string message (including multi-line) describing the event in a free form or it can
+	// be a structured data composed of arrays and maps of other values. [Optional].
+	Body *v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"`
+	// Additional attributes that describe the specific event occurrence. [Optional].
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes             []*v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	DroppedAttributesCount uint32          `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+	// Flags, a bit field. 8 least significant bits are the trace flags as
+	// defined in W3C Trace Context specification. 24 most significant bits are reserved
+	// and must be set to 0. Readers must not assume that 24 most significant bits
+	// will be zero and must correctly mask the bits when reading 8-bit trace flag (use
+	// flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional].
+	Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"`
+	// A unique identifier for a trace. All logs from the same trace share
+	// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+	// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+	// is zero-length and thus is also invalid).
+	//
+	// This field is optional.
+	//
+	// The receivers SHOULD assume that the log record is not associated with a
+	// trace if any of the following is true:
+	//   - the field is not present,
+	//   - the field contains an invalid value.
+	TraceId []byte `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+	// A unique identifier for a span within a trace, assigned when the span
+	// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+	// other than 8 bytes is considered invalid (empty string in OTLP/JSON
+	// is zero-length and thus is also invalid).
+	//
+	// This field is optional. If the sender specifies a valid span_id then it SHOULD also
+	// specify a valid trace_id.
+	//
+	// The receivers SHOULD assume that the log record is not associated with a
+	// span if any of the following is true:
+	//   - the field is not present,
+	//   - the field contains an invalid value.
+	SpanId []byte `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+}
+
+func (x *LogRecord) Reset() {
+	*x = LogRecord{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LogRecord) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogRecord) ProtoMessage() {}
+
+func (x *LogRecord) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogRecord.ProtoReflect.Descriptor instead.
+func (*LogRecord) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *LogRecord) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (x *LogRecord) GetObservedTimeUnixNano() uint64 {
+	if x != nil {
+		return x.ObservedTimeUnixNano
+	}
+	return 0
+}
+
+func (x *LogRecord) GetSeverityNumber() SeverityNumber {
+	if x != nil {
+		return x.SeverityNumber
+	}
+	return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
+}
+
+func (x *LogRecord) GetSeverityText() string {
+	if x != nil {
+		return x.SeverityText
+	}
+	return ""
+}
+
+func (x *LogRecord) GetBody() *v11.AnyValue {
+	if x != nil {
+		return x.Body
+	}
+	return nil
+}
+
+func (x *LogRecord) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *LogRecord) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+func (x *LogRecord) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+func (x *LogRecord) GetTraceId() []byte {
+	if x != nil {
+		return x.TraceId
+	}
+	return nil
+}
+
+func (x *LogRecord) GetSpanId() []byte {
+	if x != nil {
+		return x.SpanId
+	}
+	return nil
+}
+
+var File_opentelemetry_proto_logs_v1_logs_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = []byte{
+	0x0a, 0x26, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f,
+	0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f,
+	0x67, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+	0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+	0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f,
+	0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x22, 0x5a, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a,
+	0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+	0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e,
+	0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x52,
+	0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0xc3, 0x01,
+	0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x45,
+	0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
+	0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x6c,
+	0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4c, 0x6f, 0x67,
+	0x73, 0x52, 0x09, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
+	0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07,
+	0x10, 0xe9, 0x07, 0x22, 0xbe, 0x01, 0x0a, 0x09, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4c, 0x6f, 0x67,
+	0x73, 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31,
+	0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x53, 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x47, 0x0a, 0x0b,
+	0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+	0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x52, 0x65,
+	0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
+	0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x55, 0x72, 0x6c, 0x22, 0xf3, 0x03, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f,
+	0x72, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f,
+	0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65,
+	0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x35, 0x0a, 0x17, 0x6f, 0x62, 0x73, 0x65,
+	0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e,
+	0x61, 0x6e, 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x06, 0x52, 0x14, 0x6f, 0x62, 0x73, 0x65, 0x72,
+	0x76, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12,
+	0x54, 0x0a, 0x0f, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x62,
+	0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c,
+	0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e,
+	0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e,
+	0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74,
+	0x79, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65,
+	0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x54, 0x65, 0x78, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x62, 0x6f,
+	0x64, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
+	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
+	0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72,
+	0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01,
+	0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69,
+	0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c,
+	0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73,
+	0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01,
+	0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73,
+	0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70,
+	0x61, 0x6e, 0x49, 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x2a, 0xc3, 0x05, 0x0a, 0x0e, 0x53,
+	0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1f, 0x0a,
+	0x1b, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19,
+	0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45,
+	0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56,
+	0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41,
+	0x43, 0x45, 0x32, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54,
+	0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x33, 0x10,
+	0x03, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55,
+	0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x34, 0x10, 0x04, 0x12, 0x19, 0x0a,
+	0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45,
+	0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55,
+	0x47, 0x32, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59,
+	0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x33, 0x10, 0x07,
+	0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d,
+	0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x34, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14,
+	0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f,
+	0x49, 0x4e, 0x46, 0x4f, 0x10, 0x09, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49,
+	0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x32, 0x10,
+	0x0a, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55,
+	0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x33, 0x10, 0x0b, 0x12, 0x19, 0x0a, 0x15,
+	0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f,
+	0x49, 0x4e, 0x46, 0x4f, 0x34, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52,
+	0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10,
+	0x0d, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55,
+	0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x32, 0x10, 0x0e, 0x12, 0x19, 0x0a, 0x15,
+	0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f,
+	0x57, 0x41, 0x52, 0x4e, 0x33, 0x10, 0x0f, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52,
+	0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x34,
+	0x10, 0x10, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e,
+	0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x11, 0x12, 0x1a, 0x0a,
+	0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x32, 0x10, 0x12, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56,
+	0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52,
+	0x4f, 0x52, 0x33, 0x10, 0x13, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54,
+	0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x34, 0x10,
+	0x14, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55,
+	0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x15, 0x12, 0x1a, 0x0a, 0x16,
+	0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f,
+	0x46, 0x41, 0x54, 0x41, 0x4c, 0x32, 0x10, 0x16, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45,
+	0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41,
+	0x4c, 0x33, 0x10, 0x17, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59,
+	0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x34, 0x10, 0x18,
+	0x2a, 0x59, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x46, 0x6c, 0x61,
+	0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44,
+	0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53,
+	0x45, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x21, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52,
+	0x44, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c,
+	0x41, 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x73, 0x0a, 0x1e, 0x69,
+	0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x4c,
+	0x6f, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x26, 0x67, 0x6f, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f,
+	0x76, 0x31, 0xaa, 0x02, 0x1b, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x2e, 0x56, 0x31,
+	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_logs_v1_logs_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_logs_v1_logs_proto_rawDescData = file_opentelemetry_proto_logs_v1_logs_proto_rawDesc
+)
+
+func file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_logs_v1_logs_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_logs_v1_logs_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_logs_v1_logs_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_logs_v1_logs_proto_rawDescData
+}
+
+var file_opentelemetry_proto_logs_v1_logs_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_opentelemetry_proto_logs_v1_logs_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_opentelemetry_proto_logs_v1_logs_proto_goTypes = []interface{}{
+	(SeverityNumber)(0),              // 0: opentelemetry.proto.logs.v1.SeverityNumber
+	(LogRecordFlags)(0),              // 1: opentelemetry.proto.logs.v1.LogRecordFlags
+	(*LogsData)(nil),                 // 2: opentelemetry.proto.logs.v1.LogsData
+	(*ResourceLogs)(nil),             // 3: opentelemetry.proto.logs.v1.ResourceLogs
+	(*ScopeLogs)(nil),                // 4: opentelemetry.proto.logs.v1.ScopeLogs
+	(*LogRecord)(nil),                // 5: opentelemetry.proto.logs.v1.LogRecord
+	(*v1.Resource)(nil),              // 6: opentelemetry.proto.resource.v1.Resource
+	(*v11.InstrumentationScope)(nil), // 7: opentelemetry.proto.common.v1.InstrumentationScope
+	(*v11.AnyValue)(nil),             // 8: opentelemetry.proto.common.v1.AnyValue
+	(*v11.KeyValue)(nil),             // 9: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_logs_v1_logs_proto_depIdxs = []int32{
+	3, // 0: opentelemetry.proto.logs.v1.LogsData.resource_logs:type_name -> opentelemetry.proto.logs.v1.ResourceLogs
+	6, // 1: opentelemetry.proto.logs.v1.ResourceLogs.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+	4, // 2: opentelemetry.proto.logs.v1.ResourceLogs.scope_logs:type_name -> opentelemetry.proto.logs.v1.ScopeLogs
+	7, // 3: opentelemetry.proto.logs.v1.ScopeLogs.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+	5, // 4: opentelemetry.proto.logs.v1.ScopeLogs.log_records:type_name -> opentelemetry.proto.logs.v1.LogRecord
+	0, // 5: opentelemetry.proto.logs.v1.LogRecord.severity_number:type_name -> opentelemetry.proto.logs.v1.SeverityNumber
+	8, // 6: opentelemetry.proto.logs.v1.LogRecord.body:type_name -> opentelemetry.proto.common.v1.AnyValue
+	9, // 7: opentelemetry.proto.logs.v1.LogRecord.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	8, // [8:8] is the sub-list for method output_type
+	8, // [8:8] is the sub-list for method input_type
+	8, // [8:8] is the sub-list for extension type_name
+	8, // [8:8] is the sub-list for extension extendee
+	0, // [0:8] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_logs_v1_logs_proto_init() }
+func file_opentelemetry_proto_logs_v1_logs_proto_init() {
+	if File_opentelemetry_proto_logs_v1_logs_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LogsData); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceLogs); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ScopeLogs); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LogRecord); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_logs_v1_logs_proto_rawDesc,
+			NumEnums:      2,
+			NumMessages:   4,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_logs_v1_logs_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_logs_v1_logs_proto_depIdxs,
+		EnumInfos:         file_opentelemetry_proto_logs_v1_logs_proto_enumTypes,
+		MessageInfos:      file_opentelemetry_proto_logs_v1_logs_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_logs_v1_logs_proto = out.File
+	file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = nil
+	file_opentelemetry_proto_logs_v1_logs_proto_goTypes = nil
+	file_opentelemetry_proto_logs_v1_logs_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ef2656e94d45a89f7b8e61e2d9c2159c16ae4c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
@@ -0,0 +1,2495 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/metrics/v1/metrics.proto
+
+package v1
+
+import (
+	v11 "go.opentelemetry.io/proto/otlp/common/v1"
+	v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AggregationTemporality defines how a metric aggregator reports aggregated
+// values. It describes how those values relate to the time interval over
+// which they are aggregated.
+type AggregationTemporality int32
+
+const (
+	// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
+	AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
+	// DELTA is an AggregationTemporality for a metric aggregator which reports
+	// changes since last report time. Successive metrics contain aggregation of
+	// values from continuous and non-overlapping intervals.
+	//
+	// The values for a DELTA metric are based only on the time interval
+	// associated with one measurement cycle. There is no dependency on
+	// previous measurements like is the case for CUMULATIVE metrics.
+	//
+	// For example, consider a system measuring the number of requests that
+	// it receives and reports the sum of these requests every second as a
+	// DELTA metric:
+	//
+	//   1. The system starts receiving at time=t_0.
+	//   2. A request is received, the system measures 1 request.
+	//   3. A request is received, the system measures 1 request.
+	//   4. A request is received, the system measures 1 request.
+	//   5. The 1 second collection cycle ends. A metric is exported for the
+	//      number of requests received over the interval of time t_0 to
+	//      t_0+1 with a value of 3.
+	//   6. A request is received, the system measures 1 request.
+	//   7. A request is received, the system measures 1 request.
+	//   8. The 1 second collection cycle ends. A metric is exported for the
+	//      number of requests received over the interval of time t_0+1 to
+	//      t_0+2 with a value of 2.
+	AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
+	// CUMULATIVE is an AggregationTemporality for a metric aggregator which
+	// reports changes since a fixed start time. This means that current values
+	// of a CUMULATIVE metric depend on all previous measurements since the
+	// start time. Because of this, the sender is required to retain this state
+	// in some form. If this state is lost or invalidated, the CUMULATIVE metric
+	// values MUST be reset and a new fixed start time following the last
+	// reported measurement time sent MUST be used.
+	//
+	// For example, consider a system measuring the number of requests that
+	// it receives and reports the sum of these requests every second as a
+	// CUMULATIVE metric:
+	//
+	//   1. The system starts receiving at time=t_0.
+	//   2. A request is received, the system measures 1 request.
+	//   3. A request is received, the system measures 1 request.
+	//   4. A request is received, the system measures 1 request.
+	//   5. The 1 second collection cycle ends. A metric is exported for the
+	//      number of requests received over the interval of time t_0 to
+	//      t_0+1 with a value of 3.
+	//   6. A request is received, the system measures 1 request.
+	//   7. A request is received, the system measures 1 request.
+	//   8. The 1 second collection cycle ends. A metric is exported for the
+	//      number of requests received over the interval of time t_0 to
+	//      t_0+2 with a value of 5.
+	//   9. The system experiences a fault and loses state.
+	//   10. The system recovers and resumes receiving at time=t_1.
+	//   11. A request is received, the system measures 1 request.
+	//   12. The 1 second collection cycle ends. A metric is exported for the
+	//      number of requests received over the interval of time t_1 to
+	//      t_0+1 with a value of 1.
+	//
+	// Note: Even though, when reporting changes since last report time, using
+	// CUMULATIVE is valid, it is not recommended. This may cause problems for
+	// systems that do not use start_time to determine when the aggregation
+	// value was reset (e.g. Prometheus).
+	AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
+)
+
+// Enum value maps for AggregationTemporality.
+var (
+	AggregationTemporality_name = map[int32]string{
+		0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
+		1: "AGGREGATION_TEMPORALITY_DELTA",
+		2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
+	}
+	AggregationTemporality_value = map[string]int32{
+		"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
+		"AGGREGATION_TEMPORALITY_DELTA":       1,
+		"AGGREGATION_TEMPORALITY_CUMULATIVE":  2,
+	}
+)
+
+func (x AggregationTemporality) Enum() *AggregationTemporality {
+	p := new(AggregationTemporality)
+	*p = x
+	return p
+}
+
+func (x AggregationTemporality) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AggregationTemporality) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (AggregationTemporality) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[0]
+}
+
+func (x AggregationTemporality) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AggregationTemporality.Descriptor instead.
+func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
+// bit-field representing 32 distinct boolean flags.  Each flag defined in this
+// enum is a bit-mask.  To test the presence of a single flag in the flags of
+// a data point, for example, use an expression like:
+//
+//   (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
+//
+type DataPointFlags int32
+
+const (
+	// The zero value for the enum. Should not be used for comparisons.
+	// Instead use bitwise "and" with the appropriate mask as shown above.
+	DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0
+	// This DataPoint is valid but has no recorded value.  This value
+	// SHOULD be used to reflect explicitly missing data in a series, as
+	// for an equivalent to the Prometheus "staleness marker".
+	DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1
+)
+
+// Enum value maps for DataPointFlags.
+var (
+	DataPointFlags_name = map[int32]string{
+		0: "DATA_POINT_FLAGS_DO_NOT_USE",
+		1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
+	}
+	DataPointFlags_value = map[string]int32{
+		"DATA_POINT_FLAGS_DO_NOT_USE":             0,
+		"DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1,
+	}
+)
+
+func (x DataPointFlags) Enum() *DataPointFlags {
+	p := new(DataPointFlags)
+	*p = x
+	return p
+}
+
+func (x DataPointFlags) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DataPointFlags) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[1].Descriptor()
+}
+
+func (DataPointFlags) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[1]
+}
+
+func (x DataPointFlags) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DataPointFlags.Descriptor instead.
+func (DataPointFlags) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+// MetricsData represents the metrics data that can be stored in a persistent
+// storage, OR can be embedded by other protocols that transfer OTLP metrics
+// data but do not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type MetricsData struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceMetrics.
+	// For data coming from a single resource this array will typically contain
+	// one element. Intermediary nodes that receive data from multiple origins
+	// typically batch the data before forwarding further and in that case this
+	// array will contain multiple elements.
+	ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
+}
+
+func (x *MetricsData) Reset() {
+	*x = MetricsData{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetricsData) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricsData) ProtoMessage() {}
+
+func (x *MetricsData) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricsData.ProtoReflect.Descriptor instead.
+func (*MetricsData) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MetricsData) GetResourceMetrics() []*ResourceMetrics {
+	if x != nil {
+		return x.ResourceMetrics
+	}
+	return nil
+}
+
+// A collection of ScopeMetrics from a Resource.
+type ResourceMetrics struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The resource for the metrics in this message.
+	// If this field is not set then no resource info is known.
+	Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+	// A list of metrics that originate from a resource.
+	ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
+	// The Schema URL, if known. This is the identifier of the Schema that the resource data
+	// is recorded in. To learn more about Schema URL see
+	// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+	// This schema_url applies to the data in the "resource" field. It does not apply
+	// to the data in the "scope_metrics" field which have their own schema_url field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceMetrics) Reset() {
+	*x = ResourceMetrics{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceMetrics) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceMetrics) ProtoMessage() {}
+
+func (x *ResourceMetrics) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceMetrics.ProtoReflect.Descriptor instead.
+func (*ResourceMetrics) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceMetrics) GetResource() *v1.Resource {
+	if x != nil {
+		return x.Resource
+	}
+	return nil
+}
+
+func (x *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics {
+	if x != nil {
+		return x.ScopeMetrics
+	}
+	return nil
+}
+
+func (x *ResourceMetrics) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A collection of Metrics produced by an Scope.
+type ScopeMetrics struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The instrumentation scope information for the metrics in this message.
+	// Semantically when InstrumentationScope isn't set, it is equivalent with
+	// an empty instrumentation scope name (unknown).
+	Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+	// A list of metrics that originate from an instrumentation library.
+	Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	// The Schema URL, if known. This is the identifier of the Schema that the metric data
+	// is recorded in. To learn more about Schema URL see
+	// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+	// This schema_url applies to all metrics in the "metrics" field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeMetrics) Reset() {
+	*x = ScopeMetrics{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ScopeMetrics) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeMetrics) ProtoMessage() {}
+
+func (x *ScopeMetrics) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeMetrics.ProtoReflect.Descriptor instead.
+func (*ScopeMetrics) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeMetrics) GetScope() *v11.InstrumentationScope {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *ScopeMetrics) GetMetrics() []*Metric {
+	if x != nil {
+		return x.Metrics
+	}
+	return nil
+}
+
+func (x *ScopeMetrics) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// Defines a Metric which has one or more timeseries.  The following is a
+// brief summary of the Metric data model.  For more details, see:
+//
+//   https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
+//
+//
+// The data model and relation between entities is shown in the
+// diagram below. Here, "DataPoint" is the term used to refer to any
+// one of the specific data point value types, and "points" is the term used
+// to refer to any one of the lists of points contained in the Metric.
+//
+// - Metric is composed of a metadata and data.
+// - Metadata part contains a name, description, unit.
+// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
+// - DataPoint contains timestamps, attributes, and one of the possible value type
+//   fields.
+//
+//     Metric
+//  +------------+
+//  |name        |
+//  |description |
+//  |unit        |     +------------------------------------+
+//  |data        |---> |Gauge, Sum, Histogram, Summary, ... |
+//  +------------+     +------------------------------------+
+//
+//    Data [One of Gauge, Sum, Histogram, Summary, ...]
+//  +-----------+
+//  |...        |  // Metadata about the Data.
+//  |points     |--+
+//  +-----------+  |
+//                 |      +---------------------------+
+//                 |      |DataPoint 1                |
+//                 v      |+------+------+   +------+ |
+//              +-----+   ||label |label |...|label | |
+//              |  1  |-->||value1|value2|...|valueN| |
+//              +-----+   |+------+------+   +------+ |
+//              |  .  |   |+-----+                    |
+//              |  .  |   ||value|                    |
+//              |  .  |   |+-----+                    |
+//              |  .  |   +---------------------------+
+//              |  .  |                   .
+//              |  .  |                   .
+//              |  .  |                   .
+//              |  .  |   +---------------------------+
+//              |  .  |   |DataPoint M                |
+//              +-----+   |+------+------+   +------+ |
+//              |  M  |-->||label |label |...|label | |
+//              +-----+   ||value1|value2|...|valueN| |
+//                        |+------+------+   +------+ |
+//                        |+-----+                    |
+//                        ||value|                    |
+//                        |+-----+                    |
+//                        +---------------------------+
+//
+// Each distinct type of DataPoint represents the output of a specific
+// aggregation function, the result of applying the DataPoint's
+// associated function of to one or more measurements.
+//
+// All DataPoint types have three common fields:
+// - Attributes includes key-value pairs associated with the data point
+// - TimeUnixNano is required, set to the end time of the aggregation
+// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
+//   having an AggregationTemporality field, as discussed below.
+//
+// Both TimeUnixNano and StartTimeUnixNano values are expressed as
+// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+//
+// # TimeUnixNano
+//
+// This field is required, having consistent interpretation across
+// DataPoint types.  TimeUnixNano is the moment corresponding to when
+// the data point's aggregate value was captured.
+//
+// Data points with the 0 value for TimeUnixNano SHOULD be rejected
+// by consumers.
+//
+// # StartTimeUnixNano
+//
+// StartTimeUnixNano in general allows detecting when a sequence of
+// observations is unbroken.  This field indicates to consumers the
+// start time for points with cumulative and delta
+// AggregationTemporality, and it should be included whenever possible
+// to support correct rate calculation.  Although it may be omitted
+// when the start time is truly unknown, setting StartTimeUnixNano is
+// strongly encouraged.
+type Metric struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// name of the metric.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// description of the metric, which can be used in documentation.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// unit in which the metric value is reported. Follows the format
+	// described by http://unitsofmeasure.org/ucum.html.
+	Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
+	// Data determines the aggregation type (if any) of the metric, what is the
+	// reported value type for the data points, as well as the relatationship to
+	// the time interval over which they are reported.
+	//
+	// Types that are assignable to Data:
+	//	*Metric_Gauge
+	//	*Metric_Sum
+	//	*Metric_Histogram
+	//	*Metric_ExponentialHistogram
+	//	*Metric_Summary
+	Data isMetric_Data `protobuf_oneof:"data"`
+}
+
+func (x *Metric) Reset() {
+	*x = Metric{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metric) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Metric) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Metric) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Metric) GetUnit() string {
+	if x != nil {
+		return x.Unit
+	}
+	return ""
+}
+
+func (m *Metric) GetData() isMetric_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (x *Metric) GetGauge() *Gauge {
+	if x, ok := x.GetData().(*Metric_Gauge); ok {
+		return x.Gauge
+	}
+	return nil
+}
+
+func (x *Metric) GetSum() *Sum {
+	if x, ok := x.GetData().(*Metric_Sum); ok {
+		return x.Sum
+	}
+	return nil
+}
+
+func (x *Metric) GetHistogram() *Histogram {
+	if x, ok := x.GetData().(*Metric_Histogram); ok {
+		return x.Histogram
+	}
+	return nil
+}
+
+func (x *Metric) GetExponentialHistogram() *ExponentialHistogram {
+	if x, ok := x.GetData().(*Metric_ExponentialHistogram); ok {
+		return x.ExponentialHistogram
+	}
+	return nil
+}
+
+func (x *Metric) GetSummary() *Summary {
+	if x, ok := x.GetData().(*Metric_Summary); ok {
+		return x.Summary
+	}
+	return nil
+}
+
+type isMetric_Data interface {
+	isMetric_Data()
+}
+
+type Metric_Gauge struct {
+	Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"`
+}
+
+type Metric_Sum struct {
+	Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"`
+}
+
+type Metric_Histogram struct {
+	Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"`
+}
+
+type Metric_ExponentialHistogram struct {
+	ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"`
+}
+
+type Metric_Summary struct {
+	Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"`
+}
+
+func (*Metric_Gauge) isMetric_Data() {}
+
+func (*Metric_Sum) isMetric_Data() {}
+
+func (*Metric_Histogram) isMetric_Data() {}
+
+func (*Metric_ExponentialHistogram) isMetric_Data() {}
+
+func (*Metric_Summary) isMetric_Data() {}
+
+// Gauge represents the type of a scalar metric that always exports the
+// "current value" for every data point. It should be used for an "unknown"
+// aggregation.
+//
+// A Gauge does not support different aggregation temporalities. Given the
+// aggregation is unknown, points cannot be combined using the same
+// aggregation, regardless of aggregation temporalities. Therefore,
+// AggregationTemporality is not included. Consequently, this also means
+// "StartTimeUnixNano" is ignored for all data points.
+type Gauge struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+}
+
+func (x *Gauge) Reset() {
+	*x = Gauge{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Gauge) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Gauge) GetDataPoints() []*NumberDataPoint {
+	if x != nil {
+		return x.DataPoints
+	}
+	return nil
+}
+
+// Sum represents the type of a scalar metric that is calculated as a sum of all
+// reported measurements over a time interval.
+type Sum struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+	// aggregation_temporality describes if the aggregator reports delta changes
+	// since last report time, or cumulative changes since a fixed start time.
+	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+	// If "true" means that the sum is monotonic.
+	IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
+}
+
+func (x *Sum) Reset() {
+	*x = Sum{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Sum) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Sum) ProtoMessage() {}
+
+func (x *Sum) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Sum.ProtoReflect.Descriptor instead.
+func (*Sum) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Sum) GetDataPoints() []*NumberDataPoint {
+	if x != nil {
+		return x.DataPoints
+	}
+	return nil
+}
+
+func (x *Sum) GetAggregationTemporality() AggregationTemporality {
+	if x != nil {
+		return x.AggregationTemporality
+	}
+	return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
+}
+
+func (x *Sum) GetIsMonotonic() bool {
+	if x != nil {
+		return x.IsMonotonic
+	}
+	return false
+}
+
+// Histogram represents the type of a metric that is calculated by aggregating
+// as a Histogram of all reported measurements over a time interval.
+type Histogram struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+	// aggregation_temporality describes if the aggregator reports delta changes
+	// since last report time, or cumulative changes since a fixed start time.
+	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+	*x = Histogram{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Histogram) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Histogram) GetDataPoints() []*HistogramDataPoint {
+	if x != nil {
+		return x.DataPoints
+	}
+	return nil
+}
+
+func (x *Histogram) GetAggregationTemporality() AggregationTemporality {
+	if x != nil {
+		return x.AggregationTemporality
+	}
+	return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
+}
+
+// ExponentialHistogram represents the type of a metric that is calculated by aggregating
+// as a ExponentialHistogram of all reported double measurements over a time interval.
+type ExponentialHistogram struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+	// aggregation_temporality describes if the aggregator reports delta changes
+	// since last report time, or cumulative changes since a fixed start time.
+	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+}
+
+func (x *ExponentialHistogram) Reset() {
+	*x = ExponentialHistogram{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExponentialHistogram) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExponentialHistogram) ProtoMessage() {}
+
+func (x *ExponentialHistogram) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExponentialHistogram.ProtoReflect.Descriptor instead.
+func (*ExponentialHistogram) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint {
+	if x != nil {
+		return x.DataPoints
+	}
+	return nil
+}
+
+func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality {
+	if x != nil {
+		return x.AggregationTemporality
+	}
+	return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
+// data type. These data points cannot always be merged in a meaningful way.
+// While they can be useful in some applications, histogram data points are
+// recommended for new applications.
+type Summary struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+}
+
+func (x *Summary) Reset() {
+	*x = Summary{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Summary) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Summary) GetDataPoints() []*SummaryDataPoint {
+	if x != nil {
+		return x.DataPoints
+	}
+	return nil
+}
+
+// NumberDataPoint is a single data point in a timeseries that describes the
+// time-varying scalar value of a metric.
+type NumberDataPoint struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// The value itself.  A point is considered invalid when one of the recognized
+	// value fields is not present inside this oneof.
+	//
+	// Types that are assignable to Value:
+	//	*NumberDataPoint_AsDouble
+	//	*NumberDataPoint_AsInt
+	Value isNumberDataPoint_Value `protobuf_oneof:"value"`
+	// (Optional) List of exemplars collected from
+	// measurements that were used to form the data point
+	Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *NumberDataPoint) Reset() {
+	*x = NumberDataPoint{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *NumberDataPoint) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NumberDataPoint) ProtoMessage() {}
+
+func (x *NumberDataPoint) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use NumberDataPoint.ProtoReflect.Descriptor instead.
+func (*NumberDataPoint) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *NumberDataPoint) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *NumberDataPoint) GetStartTimeUnixNano() uint64 {
+	if x != nil {
+		return x.StartTimeUnixNano
+	}
+	return 0
+}
+
+func (x *NumberDataPoint) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (x *NumberDataPoint) GetAsDouble() float64 {
+	if x, ok := x.GetValue().(*NumberDataPoint_AsDouble); ok {
+		return x.AsDouble
+	}
+	return 0
+}
+
+func (x *NumberDataPoint) GetAsInt() int64 {
+	if x, ok := x.GetValue().(*NumberDataPoint_AsInt); ok {
+		return x.AsInt
+	}
+	return 0
+}
+
+func (x *NumberDataPoint) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+func (x *NumberDataPoint) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+type isNumberDataPoint_Value interface {
+	isNumberDataPoint_Value()
+}
+
+type NumberDataPoint_AsDouble struct {
+	AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"`
+}
+
+type NumberDataPoint_AsInt struct {
+	AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
+}
+
+func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
+
+func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
+
+// HistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Histogram. A Histogram contains summary statistics
+// for a population of values, it may optionally contain the distribution of
+// those values across a set of buckets.
+//
+// If the histogram contains the distribution of values, then both
+// "explicit_bounds" and "bucket counts" fields must be defined.
+// If the histogram does not contain the distribution of values, then both
+// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
+// "sum" are known.
+type HistogramDataPoint struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// count is the number of values in the population. Must be non-negative. This
+	// value must be equal to the sum of the "count" fields in buckets if a
+	// histogram is provided.
+	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+	// sum of the values in the population. If count is zero then this field
+	// must be zero.
+	//
+	// Note: Sum should only be filled out when measuring non-negative discrete
+	// events, and is assumed to be monotonic over the values of these events.
+	// Negative events *can* be recorded, but sum should not be filled out when
+	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+	Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
+	// bucket_counts is an optional field contains the count values of histogram
+	// for each bucket.
+	//
+	// The sum of the bucket_counts must equal the value in the count field.
+	//
+	// The number of elements in bucket_counts array must be by one greater than
+	// the number of elements in explicit_bounds array.
+	BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+	// explicit_bounds specifies buckets with explicitly defined bounds for values.
+	//
+	// The boundaries for bucket at index i are:
+	//
+	// (-infinity, explicit_bounds[i]] for i == 0
+	// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
+	// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
+	//
+	// The values in the explicit_bounds array must be strictly increasing.
+	//
+	// Histogram buckets are inclusive of their upper boundary, except the last
+	// bucket where the boundary is at infinity. This format is intentionally
+	// compatible with the OpenMetrics histogram definition.
+	ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
+	// (Optional) List of exemplars collected from
+	// measurements that were used to form the data point
+	Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
+	// min is the minimum value over (start_time, end_time].
+	Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
+	// max is the maximum value over (start_time, end_time].
+	Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
+}
+
+func (x *HistogramDataPoint) Reset() {
+	*x = HistogramDataPoint{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HistogramDataPoint) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HistogramDataPoint) ProtoMessage() {}
+
+func (x *HistogramDataPoint) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HistogramDataPoint.ProtoReflect.Descriptor instead.
+func (*HistogramDataPoint) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *HistogramDataPoint) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *HistogramDataPoint) GetStartTimeUnixNano() uint64 {
+	if x != nil {
+		return x.StartTimeUnixNano
+	}
+	return 0
+}
+
+func (x *HistogramDataPoint) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (x *HistogramDataPoint) GetCount() uint64 {
+	if x != nil {
+		return x.Count
+	}
+	return 0
+}
+
+func (x *HistogramDataPoint) GetSum() float64 {
+	if x != nil && x.Sum != nil {
+		return *x.Sum
+	}
+	return 0
+}
+
+func (x *HistogramDataPoint) GetBucketCounts() []uint64 {
+	if x != nil {
+		return x.BucketCounts
+	}
+	return nil
+}
+
+func (x *HistogramDataPoint) GetExplicitBounds() []float64 {
+	if x != nil {
+		return x.ExplicitBounds
+	}
+	return nil
+}
+
+func (x *HistogramDataPoint) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+func (x *HistogramDataPoint) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+func (x *HistogramDataPoint) GetMin() float64 {
+	if x != nil && x.Min != nil {
+		return *x.Min
+	}
+	return 0
+}
+
+func (x *HistogramDataPoint) GetMax() float64 {
+	if x != nil && x.Max != nil {
+		return *x.Max
+	}
+	return 0
+}
+
+// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
+// summary statistics for a population of values, it may optionally contain the
+// distribution of those values across a set of buckets.
+//
+type ExponentialHistogramDataPoint struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// count is the number of values in the population. Must be
+	// non-negative. This value must be equal to the sum of the "bucket_counts"
+	// values in the positive and negative Buckets plus the "zero_count" field.
+	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+	// sum of the values in the population. If count is zero then this field
+	// must be zero.
+	//
+	// Note: Sum should only be filled out when measuring non-negative discrete
+	// events, and is assumed to be monotonic over the values of these events.
+	// Negative events *can* be recorded, but sum should not be filled out when
+	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+	Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
+	// scale describes the resolution of the histogram.  Boundaries are
+	// located at powers of the base, where:
+	//
+	//   base = (2^(2^-scale))
+	//
+	// The histogram bucket identified by `index`, a signed integer,
+	// contains values that are greater than (base^index) and
+	// less than or equal to (base^(index+1)).
+	//
+	// The positive and negative ranges of the histogram are expressed
+	// separately.  Negative values are mapped by their absolute value
+	// into the negative range using the same scale as the positive range.
+	//
+	// scale is not restricted by the protocol, as the permissible
+	// values depend on the range of the data.
+	Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
+	// zero_count is the count of values that are either exactly zero or
+	// within the region considered zero by the instrumentation at the
+	// tolerated degree of precision.  This bucket stores values that
+	// cannot be expressed using the standard exponential formula as
+	// well as values that have been rounded to zero.
+	//
+	// Implementations MAY consider the zero bucket to have probability
+	// mass equal to (zero_count / count).
+	ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
+	// positive carries the positive range of exponential bucket counts.
+	Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"`
+	// negative carries the negative range of exponential bucket counts.
+	Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
+	// (Optional) List of exemplars collected from
+	// measurements that were used to form the data point
+	Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	// min is the minimum value over (start_time, end_time].
+	Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
+	// max is the maximum value over (start_time, end_time].
+	Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
+	// ZeroThreshold may be optionally set to convey the width of the zero
+	// region. Where the zero region is defined as the closed interval
+	// [-ZeroThreshold, ZeroThreshold].
+	// When ZeroThreshold is 0, zero count bucket stores values that cannot be
+	// expressed using the standard exponential formula as well as values that
+	// have been rounded to zero.
+	ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
+}
+
+func (x *ExponentialHistogramDataPoint) Reset() {
+	*x = ExponentialHistogramDataPoint{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExponentialHistogramDataPoint) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExponentialHistogramDataPoint) ProtoMessage() {}
+
+func (x *ExponentialHistogramDataPoint) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExponentialHistogramDataPoint.ProtoReflect.Descriptor instead.
+func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ExponentialHistogramDataPoint) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 {
+	if x != nil {
+		return x.StartTimeUnixNano
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetCount() uint64 {
+	if x != nil {
+		return x.Count
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetSum() float64 {
+	if x != nil && x.Sum != nil {
+		return *x.Sum
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetScale() int32 {
+	if x != nil {
+		return x.Scale
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetZeroCount() uint64 {
+	if x != nil {
+		return x.ZeroCount
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetPositive() *ExponentialHistogramDataPoint_Buckets {
+	if x != nil {
+		return x.Positive
+	}
+	return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetNegative() *ExponentialHistogramDataPoint_Buckets {
+	if x != nil {
+		return x.Negative
+	}
+	return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+func (x *ExponentialHistogramDataPoint) GetMin() float64 {
+	if x != nil && x.Min != nil {
+		return *x.Min
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetMax() float64 {
+	if x != nil && x.Max != nil {
+		return *x.Max
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint) GetZeroThreshold() float64 {
+	if x != nil {
+		return x.ZeroThreshold
+	}
+	return 0
+}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// count is the number of values in the population. Must be non-negative.
+	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+	// sum of the values in the population. If count is zero then this field
+	// must be zero.
+	//
+	// Note: Sum should only be filled out when measuring non-negative discrete
+	// events, and is assumed to be monotonic over the values of these events.
+	// Negative events *can* be recorded, but sum should not be filled out when
+	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
+	Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
+	// (Optional) list of values at different quantiles of the distribution calculated
+	// from the current snapshot. The quantiles must be strictly increasing.
+	QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *SummaryDataPoint) Reset() {
+	*x = SummaryDataPoint{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SummaryDataPoint) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryDataPoint) ProtoMessage() {}
+
+func (x *SummaryDataPoint) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryDataPoint.ProtoReflect.Descriptor instead.
+func (*SummaryDataPoint) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SummaryDataPoint) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *SummaryDataPoint) GetStartTimeUnixNano() uint64 {
+	if x != nil {
+		return x.StartTimeUnixNano
+	}
+	return 0
+}
+
+func (x *SummaryDataPoint) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (x *SummaryDataPoint) GetCount() uint64 {
+	if x != nil {
+		return x.Count
+	}
+	return 0
+}
+
+func (x *SummaryDataPoint) GetSum() float64 {
+	if x != nil {
+		return x.Sum
+	}
+	return 0
+}
+
+func (x *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile {
+	if x != nil {
+		return x.QuantileValues
+	}
+	return nil
+}
+
+func (x *SummaryDataPoint) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+// A representation of an exemplar, which is a sample input measurement.
+// Exemplars also hold information about the environment when the measurement
+// was recorded, for example the span and trace ID of the active span when the
+// exemplar was recorded.
+type Exemplar struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of key/value pairs that were filtered out by the aggregator, but
+	// recorded alongside the original measurement. Only key/value pairs that were
+	// filtered out by the aggregator should be included
+	FilteredAttributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"`
+	// time_unix_nano is the exact time when this exemplar was recorded
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// The value of the measurement that was recorded. An exemplar is
+	// considered invalid when one of the recognized value fields is not present
+	// inside this oneof.
+	//
+	// Types that are assignable to Value:
+	//	*Exemplar_AsDouble
+	//	*Exemplar_AsInt
+	Value isExemplar_Value `protobuf_oneof:"value"`
+	// (Optional) Span ID of the exemplar trace.
+	// span_id may be missing if the measurement is not recorded inside a trace
+	// or if the trace is not sampled.
+	SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+	// (Optional) Trace ID of the exemplar trace.
+	// trace_id may be missing if the measurement is not recorded inside a trace
+	// or if the trace is not sampled.
+	TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+}
+
+func (x *Exemplar) Reset() {
+	*x = Exemplar{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Exemplar) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *Exemplar) GetFilteredAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.FilteredAttributes
+	}
+	return nil
+}
+
+func (x *Exemplar) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (m *Exemplar) GetValue() isExemplar_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (x *Exemplar) GetAsDouble() float64 {
+	if x, ok := x.GetValue().(*Exemplar_AsDouble); ok {
+		return x.AsDouble
+	}
+	return 0
+}
+
+func (x *Exemplar) GetAsInt() int64 {
+	if x, ok := x.GetValue().(*Exemplar_AsInt); ok {
+		return x.AsInt
+	}
+	return 0
+}
+
+func (x *Exemplar) GetSpanId() []byte {
+	if x != nil {
+		return x.SpanId
+	}
+	return nil
+}
+
+func (x *Exemplar) GetTraceId() []byte {
+	if x != nil {
+		return x.TraceId
+	}
+	return nil
+}
+
+type isExemplar_Value interface {
+	isExemplar_Value()
+}
+
+type Exemplar_AsDouble struct {
+	AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"`
+}
+
+type Exemplar_AsInt struct {
+	AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
+}
+
+func (*Exemplar_AsDouble) isExemplar_Value() {}
+
+func (*Exemplar_AsInt) isExemplar_Value() {}
+
+// Buckets are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialHistogramDataPoint_Buckets struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Offset is the bucket index of the first entry in the bucket_counts array.
+	//
+	// Note: This uses a varint encoding as a simple form of compression.
+	Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
+	// bucket_counts is an array of count values, where bucket_counts[i] carries
+	// the count of the bucket at index (offset+i). bucket_counts[i] is the count
+	// of values greater than base^(offset+i) and less than or equal to
+	// base^(offset+i+1).
+	//
+	// Note: By contrast, the explicit HistogramDataPoint uses
+	// fixed64.  This field is expected to have many buckets,
+	// especially zeros, so uint64 has been selected to ensure
+	// varint encoding.
+	BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) Reset() {
+	*x = ExponentialHistogramDataPoint_Buckets{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {}
+
+func (x *ExponentialHistogramDataPoint_Buckets) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExponentialHistogramDataPoint_Buckets.ProtoReflect.Descriptor instead.
+func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{11, 0}
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 {
+	if x != nil {
+		return x.BucketCounts
+	}
+	return nil
+}
+
+// Represents the value at a given quantile of a distribution.
+//
+// To record Min and Max values following conventions are used:
+// - The 1.0 quantile is equivalent to the maximum value observed.
+// - The 0.0 quantile is equivalent to the minimum value observed.
+//
+// See the following issue for more context:
+// https://github.com/open-telemetry/opentelemetry-proto/issues/125
+type SummaryDataPoint_ValueAtQuantile struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The quantile of a distribution. Must be in the interval
+	// [0.0, 1.0].
+	Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
+	// The value at the given quantile of a distribution.
+	//
+	// Quantile values must NOT be negative.
+	Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) Reset() {
+	*x = SummaryDataPoint_ValueAtQuantile{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {}
+
+func (x *SummaryDataPoint_ValueAtQuantile) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SummaryDataPoint_ValueAtQuantile.ProtoReflect.Descriptor instead.
+func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 {
+	if x != nil {
+		return x.Quantile
+	}
+	return 0
+}
+
+func (x *SummaryDataPoint_ValueAtQuantile) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_metrics_v1_metrics_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc = []byte{
+	0x0a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31,
+	0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2a,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+	0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f,
+	0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x69, 0x0a, 0x0b, 0x4d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74,
+	0x72, 0x69, 0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0xd2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
+	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+	0x12, 0x51, 0x0a, 0x0d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x72,
+	0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72,
+	0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55,
+	0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x22, 0xba, 0x01, 0x0a, 0x0c, 0x53,
+	0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x49, 0x0a, 0x05, 0x73,
+	0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72,
+	0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x52,
+	0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52,
+	0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe1, 0x03, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72,
+	0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x3d, 0x0a, 0x05,
+	0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x61, 0x75,
+	0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x03, 0x73,
+	0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52,
+	0x03, 0x73, 0x75, 0x6d, 0x12, 0x49, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+	0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72,
+	0x61, 0x6d, 0x48, 0x00, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12,
+	0x6b, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x68,
+	0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+	0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f,
+	0x67, 0x72, 0x61, 0x6d, 0x48, 0x00, 0x52, 0x14, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
+	0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x43, 0x0a, 0x07,
+	0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+	0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+	0x79, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a,
+	0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x59, 0x0a, 0x05, 0x47,
+	0x61, 0x75, 0x67, 0x65, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69,
+	0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, 0x6d, 0x62, 0x65,
+	0x72, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61,
+	0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xeb, 0x01, 0x0a, 0x03, 0x53, 0x75, 0x6d, 0x12, 0x50,
+	0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+	0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x50,
+	0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73,
+	0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+	0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
+	0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65,
+	0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x16, 0x61, 0x67, 0x67, 0x72, 0x65,
+	0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74,
+	0x79, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x6e, 0x6f, 0x74, 0x6f, 0x6e, 0x69,
+	0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4d, 0x6f, 0x6e, 0x6f, 0x74,
+	0x6f, 0x6e, 0x69, 0x63, 0x22, 0xd1, 0x01, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72,
+	0x61, 0x6d, 0x12, 0x53, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+	0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65,
+	0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72,
+	0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74,
+	0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65,
+	0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69,
+	0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79,
+	0x52, 0x16, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d,
+	0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xe7, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70,
+	0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+	0x6d, 0x12, 0x5e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73,
+	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74,
+	0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
+	0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61,
+	0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+	0x73, 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+	0x2e, 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+	0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x16, 0x61, 0x67, 0x67, 0x72,
+	0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69,
+	0x74, 0x79, 0x22, 0x5c, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a,
+	0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+	0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50,
+	0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73,
+	0x22, 0xd6, 0x02, 0x0a, 0x0f, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x50,
+	0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+	0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
+	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a,
+	0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78,
+	0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61,
+	0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24,
+	0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+	0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, 0x64, 0x6f, 0x75, 0x62, 0x6c,
+	0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x73, 0x44, 0x6f, 0x75,
+	0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20,
+	0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x09,
+	0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31,
+	0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70,
+	0x6c, 0x61, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20,
+	0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x48, 0x69,
+	0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+	0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+	0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+	0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61,
+	0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61,
+	0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+	0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69,
+	0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69,
+	0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f,
+	0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52,
+	0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x15, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a,
+	0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x06,
+	0x20, 0x03, 0x28, 0x06, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e,
+	0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x62,
+	0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0e, 0x65, 0x78, 0x70,
+	0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x12, 0x46, 0x0a, 0x09, 0x65,
+	0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+	0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
+	0x61, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01,
+	0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x69, 0x6e,
+	0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x88, 0x01, 0x01,
+	0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, 0x02, 0x52,
+	0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73, 0x75, 0x6d, 0x42,
+	0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x61, 0x78, 0x4a,
+	0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xfa, 0x05, 0x0a, 0x1d, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65,
+	0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61,
+	0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
+	0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75,
+	0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11,
+	0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+	0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e,
+	0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55,
+	0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x15, 0x0a,
+	0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75,
+	0x6d, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x06, 0x20,
+	0x01, 0x28, 0x11, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
+	0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x09,
+	0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x08, 0x70, 0x6f, 0x73,
+	0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
+	0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+	0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
+	0x74, 0x73, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x61, 0x0a, 0x08,
+	0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+	0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f,
+	0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+	0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x08, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12,
+	0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05,
+	0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x46, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
+	0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c,
+	0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x15, 0x0a,
+	0x03, 0x6d, 0x69, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x03, 0x6d, 0x69,
+	0x6e, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0d, 0x20, 0x01, 0x28,
+	0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x7a,
+	0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f,
+	0x6c, 0x64, 0x1a, 0x46, 0x0a, 0x07, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a,
+	0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f,
+	0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f,
+	0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0c, 0x62, 0x75,
+	0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x73,
+	0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d,
+	0x61, 0x78, 0x22, 0xa6, 0x03, 0x0a, 0x10, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61,
+	0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
+	0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75,
+	0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11,
+	0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+	0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e,
+	0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55,
+	0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a,
+	0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12,
+	0x69, 0x0a, 0x0f, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d,
+	0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+	0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x0e, 0x71, 0x75, 0x61, 0x6e,
+	0x74, 0x69, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c,
+	0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73,
+	0x1a, 0x43, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74,
+	0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12,
+	0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x85, 0x02, 0x0a, 0x08,
+	0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x58, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x74,
+	0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+	0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+	0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+	0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12,
+	0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+	0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f,
+	0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65,
+	0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, 0x64,
+	0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61,
+	0x73, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, 0x6e,
+	0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, 0x74,
+	0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61,
+	0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61,
+	0x63, 0x65, 0x49, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08,
+	0x01, 0x10, 0x02, 0x2a, 0x8c, 0x01, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x27,
+	0x0a, 0x23, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45,
+	0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+	0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x41, 0x47, 0x47, 0x52, 0x45,
+	0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49,
+	0x54, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22, 0x41, 0x47,
+	0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52,
+	0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45,
+	0x10, 0x02, 0x2a, 0x5e, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x46,
+	0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x50, 0x4f, 0x49,
+	0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f,
+	0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x27, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x50, 0x4f,
+	0x49, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x43,
+	0x4f, 0x52, 0x44, 0x45, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b,
+	0x10, 0x01, 0x42, 0x7f, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74,
+	0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f,
+	0x76, 0x31, 0xaa, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+	0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData = file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc
+)
+
+func file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData
+}
+
+var file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes = []interface{}{
+	(AggregationTemporality)(0),                   // 0: opentelemetry.proto.metrics.v1.AggregationTemporality
+	(DataPointFlags)(0),                           // 1: opentelemetry.proto.metrics.v1.DataPointFlags
+	(*MetricsData)(nil),                           // 2: opentelemetry.proto.metrics.v1.MetricsData
+	(*ResourceMetrics)(nil),                       // 3: opentelemetry.proto.metrics.v1.ResourceMetrics
+	(*ScopeMetrics)(nil),                          // 4: opentelemetry.proto.metrics.v1.ScopeMetrics
+	(*Metric)(nil),                                // 5: opentelemetry.proto.metrics.v1.Metric
+	(*Gauge)(nil),                                 // 6: opentelemetry.proto.metrics.v1.Gauge
+	(*Sum)(nil),                                   // 7: opentelemetry.proto.metrics.v1.Sum
+	(*Histogram)(nil),                             // 8: opentelemetry.proto.metrics.v1.Histogram
+	(*ExponentialHistogram)(nil),                  // 9: opentelemetry.proto.metrics.v1.ExponentialHistogram
+	(*Summary)(nil),                               // 10: opentelemetry.proto.metrics.v1.Summary
+	(*NumberDataPoint)(nil),                       // 11: opentelemetry.proto.metrics.v1.NumberDataPoint
+	(*HistogramDataPoint)(nil),                    // 12: opentelemetry.proto.metrics.v1.HistogramDataPoint
+	(*ExponentialHistogramDataPoint)(nil),         // 13: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint
+	(*SummaryDataPoint)(nil),                      // 14: opentelemetry.proto.metrics.v1.SummaryDataPoint
+	(*Exemplar)(nil),                              // 15: opentelemetry.proto.metrics.v1.Exemplar
+	(*ExponentialHistogramDataPoint_Buckets)(nil), // 16: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets
+	(*SummaryDataPoint_ValueAtQuantile)(nil),      // 17: opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile
+	(*v1.Resource)(nil),                           // 18: opentelemetry.proto.resource.v1.Resource
+	(*v11.InstrumentationScope)(nil),              // 19: opentelemetry.proto.common.v1.InstrumentationScope
+	(*v11.KeyValue)(nil),                          // 20: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs = []int32{
+	3,  // 0: opentelemetry.proto.metrics.v1.MetricsData.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics
+	18, // 1: opentelemetry.proto.metrics.v1.ResourceMetrics.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+	4,  // 2: opentelemetry.proto.metrics.v1.ResourceMetrics.scope_metrics:type_name -> opentelemetry.proto.metrics.v1.ScopeMetrics
+	19, // 3: opentelemetry.proto.metrics.v1.ScopeMetrics.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+	5,  // 4: opentelemetry.proto.metrics.v1.ScopeMetrics.metrics:type_name -> opentelemetry.proto.metrics.v1.Metric
+	6,  // 5: opentelemetry.proto.metrics.v1.Metric.gauge:type_name -> opentelemetry.proto.metrics.v1.Gauge
+	7,  // 6: opentelemetry.proto.metrics.v1.Metric.sum:type_name -> opentelemetry.proto.metrics.v1.Sum
+	8,  // 7: opentelemetry.proto.metrics.v1.Metric.histogram:type_name -> opentelemetry.proto.metrics.v1.Histogram
+	9,  // 8: opentelemetry.proto.metrics.v1.Metric.exponential_histogram:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogram
+	10, // 9: opentelemetry.proto.metrics.v1.Metric.summary:type_name -> opentelemetry.proto.metrics.v1.Summary
+	11, // 10: opentelemetry.proto.metrics.v1.Gauge.data_points:type_name -> opentelemetry.proto.metrics.v1.NumberDataPoint
+	11, // 11: opentelemetry.proto.metrics.v1.Sum.data_points:type_name -> opentelemetry.proto.metrics.v1.NumberDataPoint
+	0,  // 12: opentelemetry.proto.metrics.v1.Sum.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality
+	12, // 13: opentelemetry.proto.metrics.v1.Histogram.data_points:type_name -> opentelemetry.proto.metrics.v1.HistogramDataPoint
+	0,  // 14: opentelemetry.proto.metrics.v1.Histogram.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality
+	13, // 15: opentelemetry.proto.metrics.v1.ExponentialHistogram.data_points:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint
+	0,  // 16: opentelemetry.proto.metrics.v1.ExponentialHistogram.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality
+	14, // 17: opentelemetry.proto.metrics.v1.Summary.data_points:type_name -> opentelemetry.proto.metrics.v1.SummaryDataPoint
+	20, // 18: opentelemetry.proto.metrics.v1.NumberDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	15, // 19: opentelemetry.proto.metrics.v1.NumberDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar
+	20, // 20: opentelemetry.proto.metrics.v1.HistogramDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	15, // 21: opentelemetry.proto.metrics.v1.HistogramDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar
+	20, // 22: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	16, // 23: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.positive:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets
+	16, // 24: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.negative:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets
+	15, // 25: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar
+	20, // 26: opentelemetry.proto.metrics.v1.SummaryDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	17, // 27: opentelemetry.proto.metrics.v1.SummaryDataPoint.quantile_values:type_name -> opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile
+	20, // 28: opentelemetry.proto.metrics.v1.Exemplar.filtered_attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	29, // [29:29] is the sub-list for method output_type
+	29, // [29:29] is the sub-list for method input_type
+	29, // [29:29] is the sub-list for extension type_name
+	29, // [29:29] is the sub-list for extension extendee
+	0,  // [0:29] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_metrics_v1_metrics_proto_init() }
+func file_opentelemetry_proto_metrics_v1_metrics_proto_init() {
+	if File_opentelemetry_proto_metrics_v1_metrics_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetricsData); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceMetrics); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ScopeMetrics); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Metric); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Gauge); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Sum); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Histogram); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExponentialHistogram); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Summary); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*NumberDataPoint); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*HistogramDataPoint); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExponentialHistogramDataPoint); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SummaryDataPoint); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Exemplar); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExponentialHistogramDataPoint_Buckets); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SummaryDataPoint_ValueAtQuantile); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3].OneofWrappers = []interface{}{
+		(*Metric_Gauge)(nil),
+		(*Metric_Sum)(nil),
+		(*Metric_Histogram)(nil),
+		(*Metric_ExponentialHistogram)(nil),
+		(*Metric_Summary)(nil),
+	}
+	file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9].OneofWrappers = []interface{}{
+		(*NumberDataPoint_AsDouble)(nil),
+		(*NumberDataPoint_AsInt)(nil),
+	}
+	file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10].OneofWrappers = []interface{}{}
+	file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11].OneofWrappers = []interface{}{}
+	file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13].OneofWrappers = []interface{}{
+		(*Exemplar_AsDouble)(nil),
+		(*Exemplar_AsInt)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc,
+			NumEnums:      2,
+			NumMessages:   16,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs,
+		EnumInfos:         file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes,
+		MessageInfos:      file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_metrics_v1_metrics_proto = out.File
+	file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc = nil
+	file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes = nil
+	file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..b7545b03b9fe9424e6b3c6519b715b77f9f66739
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -0,0 +1,193 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+	v1 "go.opentelemetry.io/proto/otlp/common/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource information.
+type Resource struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Set of attributes that describe the resource.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+	// no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Resource) Reset() {
+	*x = Resource{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Resource) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetAttributes() []*v1.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Resource) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
+	0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+	0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76,
+	0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31,
+	0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01,
+	0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+	0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+	0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+	0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01,
+	0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+	0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+	0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc
+)
+
+func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData
+}
+
+var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{
+	(*Resource)(nil),    // 0: opentelemetry.proto.resource.v1.Resource
+	(*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{
+	1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() }
+func file_opentelemetry_proto_resource_v1_resource_proto_init() {
+	if File_opentelemetry_proto_resource_v1_resource_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Resource); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_resource_v1_resource_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_resource_v1_resource_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_resource_v1_resource_proto = out.File
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil
+	file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil
+	file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..7109088370545165cb07b1079da0ea615b7e7774
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -0,0 +1,1251 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.6
+// source: opentelemetry/proto/trace/v1/trace.proto
+
+package v1
+
+import (
+	v11 "go.opentelemetry.io/proto/otlp/common/v1"
+	v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask.  To extract the bit-field, for example, use an
+// expression like:
+//
+//   (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol.  Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+	// The zero value for the enum. Should not be used for comparisons.
+	// Instead use bitwise "and" with the appropriate mask as shown above.
+	SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
+	// Bits 0-7 are used for trace flags.
+	SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
+)
+
+// Enum value maps for SpanFlags.
+var (
+	SpanFlags_name = map[int32]string{
+		0:   "SPAN_FLAGS_DO_NOT_USE",
+		255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
+	}
+	SpanFlags_value = map[string]int32{
+		"SPAN_FLAGS_DO_NOT_USE":       0,
+		"SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
+	}
+)
+
+func (x SpanFlags) Enum() *SpanFlags {
+	p := new(SpanFlags)
+	*p = x
+	return p
+}
+
+func (x SpanFlags) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SpanFlags) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (SpanFlags) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x SpanFlags) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SpanFlags.Descriptor instead.
+func (SpanFlags) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type Span_SpanKind int32
+
+const (
+	// Unspecified. Do NOT use as default.
+	// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
+	Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
+	// Indicates that the span represents an internal operation within an application,
+	// as opposed to an operation happening at the boundaries. Default value.
+	Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
+	// Indicates that the span covers server-side handling of an RPC or other
+	// remote network request.
+	Span_SPAN_KIND_SERVER Span_SpanKind = 2
+	// Indicates that the span describes a request to some remote service.
+	Span_SPAN_KIND_CLIENT Span_SpanKind = 3
+	// Indicates that the span describes a producer sending a message to a broker.
+	// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+	// between producer and consumer spans. A PRODUCER span ends when the message was accepted
+	// by the broker while the logical processing of the message might span a much longer time.
+	Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
+	// Indicates that the span describes consumer receiving a message from a broker.
+	// Like the PRODUCER kind, there is often no direct critical path latency relationship
+	// between producer and consumer spans.
+	Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
+)
+
+// Enum value maps for Span_SpanKind.
+var (
+	Span_SpanKind_name = map[int32]string{
+		0: "SPAN_KIND_UNSPECIFIED",
+		1: "SPAN_KIND_INTERNAL",
+		2: "SPAN_KIND_SERVER",
+		3: "SPAN_KIND_CLIENT",
+		4: "SPAN_KIND_PRODUCER",
+		5: "SPAN_KIND_CONSUMER",
+	}
+	Span_SpanKind_value = map[string]int32{
+		"SPAN_KIND_UNSPECIFIED": 0,
+		"SPAN_KIND_INTERNAL":    1,
+		"SPAN_KIND_SERVER":      2,
+		"SPAN_KIND_CLIENT":      3,
+		"SPAN_KIND_PRODUCER":    4,
+		"SPAN_KIND_CONSUMER":    5,
+	}
+)
+
+func (x Span_SpanKind) Enum() *Span_SpanKind {
+	p := new(Span_SpanKind)
+	*p = x
+	return p
+}
+
+func (x Span_SpanKind) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+}
+
+func (Span_SpanKind) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
+}
+
+func (x Span_SpanKind) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_SpanKind.Descriptor instead.
+func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type Status_StatusCode int32
+
+const (
+	// The default status.
+	Status_STATUS_CODE_UNSET Status_StatusCode = 0
+	// The Span has been validated by an Application developer or Operator to
+	// have completed successfully.
+	Status_STATUS_CODE_OK Status_StatusCode = 1
+	// The Span contains an error.
+	Status_STATUS_CODE_ERROR Status_StatusCode = 2
+)
+
+// Enum value maps for Status_StatusCode.
+var (
+	Status_StatusCode_name = map[int32]string{
+		0: "STATUS_CODE_UNSET",
+		1: "STATUS_CODE_OK",
+		2: "STATUS_CODE_ERROR",
+	}
+	Status_StatusCode_value = map[string]int32{
+		"STATUS_CODE_UNSET": 0,
+		"STATUS_CODE_OK":    1,
+		"STATUS_CODE_ERROR": 2,
+	}
+)
+
+func (x Status_StatusCode) Enum() *Status_StatusCode {
+	p := new(Status_StatusCode)
+	*p = x
+	return p
+}
+
+func (x Status_StatusCode) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor()
+}
+
+func (Status_StatusCode) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2]
+}
+
+func (x Status_StatusCode) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Status_StatusCode.Descriptor instead.
+func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0}
+}
+
+// TracesData represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type TracesData struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceSpans.
+	// For data coming from a single resource this array will typically contain
+	// one element. Intermediary nodes that receive data from multiple origins
+	// typically batch the data before forwarding further and in that case this
+	// array will contain multiple elements.
+	ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *TracesData) Reset() {
+	*x = TracesData{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TracesData) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TracesData) ProtoMessage() {}
+
+func (x *TracesData) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TracesData.ProtoReflect.Descriptor instead.
+func (*TracesData) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TracesData) GetResourceSpans() []*ResourceSpans {
+	if x != nil {
+		return x.ResourceSpans
+	}
+	return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The resource for the spans in this message.
+	// If this field is not set then no resource info is known.
+	Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+	// A list of ScopeSpans that originate from a resource.
+	ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
+	// The Schema URL, if known. This is the identifier of the Schema that the resource data
+	// is recorded in. To learn more about Schema URL see
+	// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+	// This schema_url applies to the data in the "resource" field. It does not apply
+	// to the data in the "scope_spans" field which have their own schema_url field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceSpans) Reset() {
+	*x = ResourceSpans{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceSpans) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceSpans) ProtoMessage() {}
+
+func (x *ResourceSpans) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead.
+func (*ResourceSpans) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceSpans) GetResource() *v1.Resource {
+	if x != nil {
+		return x.Resource
+	}
+	return nil
+}
+
+func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans {
+	if x != nil {
+		return x.ScopeSpans
+	}
+	return nil
+}
+
+func (x *ResourceSpans) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The instrumentation scope information for the spans in this message.
+	// Semantically when InstrumentationScope isn't set, it is equivalent with
+	// an empty instrumentation scope name (unknown).
+	Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+	// A list of Spans that originate from an instrumentation scope.
+	Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+	// The Schema URL, if known. This is the identifier of the Schema that the span data
+	// is recorded in. To learn more about Schema URL see
+	// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+	// This schema_url applies to all spans and span events in the "spans" field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeSpans) Reset() {
+	*x = ScopeSpans{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ScopeSpans) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeSpans) ProtoMessage() {}
+
+func (x *ScopeSpans) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead.
+func (*ScopeSpans) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeSpans) GetScope() *v11.InstrumentationScope {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *ScopeSpans) GetSpans() []*Span {
+	if x != nil {
+		return x.Spans
+	}
+	return nil
+}
+
+func (x *ScopeSpans) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A Span represents a single operation performed by a single component of the system.
+//
+// The next available field id is 17.
+type Span struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A unique identifier for a trace. All spans from the same trace share
+	// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+	// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+	// is zero-length and thus is also invalid).
+	//
+	// This field is required.
+	TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+	// A unique identifier for a span within a trace, assigned when the span
+	// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+	// other than 8 bytes is considered invalid (empty string in OTLP/JSON
+	// is zero-length and thus is also invalid).
+	//
+	// This field is required.
+	SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+	// trace_state conveys information about request position in multiple distributed tracing graphs.
+	// It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+	// See also https://github.com/w3c/distributed-tracing for more details about this field.
+	TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+	// The `span_id` of this span's parent span. If this is a root span, then this
+	// field must be empty. The ID is an 8-byte array.
+	ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+	// Flags, a bit field. 8 least significant bits are the trace
+	// flags as defined in W3C Trace Context specification. Readers
+	// MUST not assume that 24 most significant bits will be zero.
+	// To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+	//
+	// When creating span messages, if the message is logically forwarded from another source
+	// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+	// be copied as-is. If creating from a source that does not have an equivalent flags field
+	// (such as a runtime representation of an OpenTelemetry span), the high 24 bits MUST
+	// be set to zero.
+	//
+	// [Optional].
+	//
+	// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+	Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
+	// A description of the span's operation.
+	//
+	// For example, the name can be a qualified method name or a file name
+	// and a line number where the operation is called. A best practice is to use
+	// the same display name at the same call point in an application.
+	// This makes it easier to correlate spans in different traces.
+	//
+	// This field is semantically required to be set to non-empty string.
+	// Empty value is equivalent to an unknown span name.
+	//
+	// This field is required.
+	Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+	// Distinguishes between spans generated in a particular context. For example,
+	// two spans with the same name may be distinguished using `CLIENT` (caller)
+	// and `SERVER` (callee) to identify queueing latency associated with the span.
+	Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
+	// start_time_unix_nano is the start time of the span. On the client side, this is the time
+	// kept by the local machine where the span execution starts. On the server side, this
+	// is the time when the server's application handler starts running.
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+	//
+	// This field is semantically required and it is expected that end_time >= start_time.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// end_time_unix_nano is the end time of the span. On the client side, this is the time
+	// kept by the local machine where the span execution ends. On the server side, this
+	// is the time when the server application handler stops running.
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+	//
+	// This field is semantically required and it is expected that end_time >= start_time.
+	EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
+	// attributes is a collection of key/value pairs. Note, global attributes
+	// like server name can be set using the resource API. Examples of attributes:
+	//
+	//     "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+	//     "/http/server_latency": 300
+	//     "example.com/myattribute": true
+	//     "example.com/score": 10.239
+	//
+	// The OpenTelemetry API specification further restricts the allowed value types:
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of attributes that were discarded. Attributes
+	// can be discarded because their keys are too long or because there are too many
+	// attributes. If this value is 0, then no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+	// events is a collection of Event items.
+	Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+	// dropped_events_count is the number of dropped events. If the value is 0, then no
+	// events were dropped.
+	DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
+	// links is a collection of Links, which are references from this span to a span
+	// in the same or different trace.
+	Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
+	// dropped_links_count is the number of dropped links after the maximum size was
+	// enforced. If this value is 0, then no links were dropped.
+	DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+	// An optional final status for this span. Semantically when Status isn't set, it means
+	// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+	Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *Span) Reset() {
+	*x = Span{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Span) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span) ProtoMessage() {}
+
+func (x *Span) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span.ProtoReflect.Descriptor instead.
+func (*Span) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Span) GetTraceId() []byte {
+	if x != nil {
+		return x.TraceId
+	}
+	return nil
+}
+
+func (x *Span) GetSpanId() []byte {
+	if x != nil {
+		return x.SpanId
+	}
+	return nil
+}
+
+func (x *Span) GetTraceState() string {
+	if x != nil {
+		return x.TraceState
+	}
+	return ""
+}
+
+func (x *Span) GetParentSpanId() []byte {
+	if x != nil {
+		return x.ParentSpanId
+	}
+	return nil
+}
+
+func (x *Span) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+func (x *Span) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Span) GetKind() Span_SpanKind {
+	if x != nil {
+		return x.Kind
+	}
+	return Span_SPAN_KIND_UNSPECIFIED
+}
+
+func (x *Span) GetStartTimeUnixNano() uint64 {
+	if x != nil {
+		return x.StartTimeUnixNano
+	}
+	return 0
+}
+
+func (x *Span) GetEndTimeUnixNano() uint64 {
+	if x != nil {
+		return x.EndTimeUnixNano
+	}
+	return 0
+}
+
+func (x *Span) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Span) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+func (x *Span) GetEvents() []*Span_Event {
+	if x != nil {
+		return x.Events
+	}
+	return nil
+}
+
+func (x *Span) GetDroppedEventsCount() uint32 {
+	if x != nil {
+		return x.DroppedEventsCount
+	}
+	return 0
+}
+
+func (x *Span) GetLinks() []*Span_Link {
+	if x != nil {
+		return x.Links
+	}
+	return nil
+}
+
+func (x *Span) GetDroppedLinksCount() uint32 {
+	if x != nil {
+		return x.DroppedLinksCount
+	}
+	return 0
+}
+
+func (x *Span) GetStatus() *Status {
+	if x != nil {
+		return x.Status
+	}
+	return nil
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A developer-facing human readable error message.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	// The status code.
+	Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
+}
+
+func (x *Status) Reset() {
+	*x = Status{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Status) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Status) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+func (x *Status) GetCode() Status_StatusCode {
+	if x != nil {
+		return x.Code
+	}
+	return Status_STATUS_CODE_UNSET
+}
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type Span_Event struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// time_unix_nano is the time the event occurred.
+	TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// name of the event.
+	// This field is semantically required to be set to non-empty string.
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// attributes is a collection of attribute key/value pairs on the event.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0,
+	// then no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Span_Event) Reset() {
+	*x = Span_Event{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Span_Event) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Event) ProtoMessage() {}
+
+func (x *Span_Event) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead.
+func (*Span_Event) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *Span_Event) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (x *Span_Event) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Span_Event) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Span_Event) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A unique identifier of a trace that this linked span is part of. The ID is a
+	// 16-byte array.
+	TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+	// A unique identifier for the linked span. The ID is an 8-byte array.
+	SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+	// The trace_state associated with the link.
+	TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+	// attributes is a collection of attribute key/value pairs on the link.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0,
+	// then no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+	// Flags, a bit field. 8 least significant bits are the trace
+	// flags as defined in W3C Trace Context specification. Readers
+	// MUST not assume that 24 most significant bits will be zero.
+	// When creating new spans, the most-significant 24-bits MUST be
+	// zero.  To read the 8-bit W3C trace flag (use flags &
+	// SPAN_FLAGS_TRACE_FLAGS_MASK).  [Optional].
+	//
+	// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+	Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *Span_Link) Reset() {
+	*x = Span_Link{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Span_Link) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Link) ProtoMessage() {}
+
+func (x *Span_Link) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead.
+func (*Span_Link) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *Span_Link) GetTraceId() []byte {
+	if x != nil {
+		return x.TraceId
+	}
+	return nil
+}
+
+func (x *Span_Link) GetSpanId() []byte {
+	if x != nil {
+		return x.SpanId
+	}
+	return nil
+}
+
+func (x *Span_Link) GetTraceState() string {
+	if x != nil {
+		return x.TraceState
+	}
+	return ""
+}
+
+func (x *Span_Link) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Span_Link) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+func (x *Span_Link) GetFlags() uint32 {
+	if x != nil {
+		return x.Flags
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
+	0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74,
+	0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61,
+	0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73,
+	0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75,
+	0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+	0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+	0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+	0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+	0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a,
+	0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+	0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
+	0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+	0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+	0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+	0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73,
+	0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05,
+	0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
+	0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
+	0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+	0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e,
+	0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49,
+	0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61,
+	0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61,
+	0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65,
+	0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67,
+	0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
+	0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+	0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b,
+	0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+	0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28,
+	0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+	0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+	0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06,
+	0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+	0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+	0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+	0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+	0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a,
+	0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72,
+	0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72,
+	0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+	0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+	0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
+	0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65,
+	0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c,
+	0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65,
+	0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+	0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+	0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b,
+	0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70,
+	0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e,
+	0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e,
+	0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+	0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+	0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+	0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+	0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+	0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69,
+	0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74,
+	0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+	0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+	0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74,
+	0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a,
+	0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69,
+	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64,
+	0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61,
+	0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+	0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+	0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+	0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+	0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a,
+	0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c,
+	0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64,
+	0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e,
+	0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53,
+	0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41,
+	0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
+	0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41,
+	0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12,
+	0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f,
+	0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+	0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22,
+	0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+	0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+	0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+	0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+	0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55,
+	0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12,
+	0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b,
+	0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
+	0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a,
+	0x48, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, 0x15,
+	0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f,
+	0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+	0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47,
+	0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72,
+	0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65,
+	0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e,
+	0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc
+)
+
+func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData
+}
+
+var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{
+	(SpanFlags)(0),                   // 0: opentelemetry.proto.trace.v1.SpanFlags
+	(Span_SpanKind)(0),               // 1: opentelemetry.proto.trace.v1.Span.SpanKind
+	(Status_StatusCode)(0),           // 2: opentelemetry.proto.trace.v1.Status.StatusCode
+	(*TracesData)(nil),               // 3: opentelemetry.proto.trace.v1.TracesData
+	(*ResourceSpans)(nil),            // 4: opentelemetry.proto.trace.v1.ResourceSpans
+	(*ScopeSpans)(nil),               // 5: opentelemetry.proto.trace.v1.ScopeSpans
+	(*Span)(nil),                     // 6: opentelemetry.proto.trace.v1.Span
+	(*Status)(nil),                   // 7: opentelemetry.proto.trace.v1.Status
+	(*Span_Event)(nil),               // 8: opentelemetry.proto.trace.v1.Span.Event
+	(*Span_Link)(nil),                // 9: opentelemetry.proto.trace.v1.Span.Link
+	(*v1.Resource)(nil),              // 10: opentelemetry.proto.resource.v1.Resource
+	(*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope
+	(*v11.KeyValue)(nil),             // 12: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{
+	4,  // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+	10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+	5,  // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
+	11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+	6,  // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
+	1,  // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
+	12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	8,  // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
+	9,  // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
+	7,  // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
+	2,  // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
+	12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	13, // [13:13] is the sub-list for method output_type
+	13, // [13:13] is the sub-list for method input_type
+	13, // [13:13] is the sub-list for extension type_name
+	13, // [13:13] is the sub-list for extension extendee
+	0,  // [0:13] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() }
+func file_opentelemetry_proto_trace_v1_trace_proto_init() {
+	if File_opentelemetry_proto_trace_v1_trace_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TracesData); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceSpans); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ScopeSpans); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Span); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Status); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Span_Event); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Span_Link); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc,
+			NumEnums:      3,
+			NumMessages:   7,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_trace_v1_trace_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs,
+		EnumInfos:         file_opentelemetry_proto_trace_v1_trace_proto_enumTypes,
+		MessageInfos:      file_opentelemetry_proto_trace_v1_trace_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_trace_v1_trace_proto = out.File
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil
+	file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil
+	file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil
+}
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
index de67f938a14b4b089fac1e828fdeab35377d0b7c..3c57880d6979ace65723338f6a0afcde5c260b2a 100644
--- a/vendor/golang.org/x/net/html/token.go
+++ b/vendor/golang.org/x/net/html/token.go
@@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() {
 			return
 		}
 		switch c {
-		case ' ', '\n', '\r', '\t', '\f', '/':
-			z.pendingAttr[0].end = z.raw.end - 1
-			return
 		case '=':
 			if z.pendingAttr[0].start+1 == z.raw.end {
 				// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
@@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() {
 				continue
 			}
 			fallthrough
-		case '>':
+		case ' ', '\n', '\r', '\t', '\f', '/', '>':
+			// WHATWG 13.2.5.33 Attribute name state
+			// We need to reconsume the char in the after attribute name state to support the / character
 			z.raw.end--
 			z.pendingAttr[0].end = z.raw.end
 			return
@@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() {
 	if z.err != nil {
 		return
 	}
+	if c == '/' {
+		// WHATWG 13.2.5.34 After attribute name state
+		// U+002F SOLIDUS (/) - Switch to the self-closing start tag state.
+		return
+	}
 	if c != '=' {
 		z.raw.end--
 		return
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index c1f6b90dc32f62e9f47ec218dabc60cecde8376d..e2b298d859344158f3b3d5f4fcf8ef05c67f6a00 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
 }
 
 func (fr *Framer) maxHeaderStringLen() int {
-	v := fr.maxHeaderListSize()
-	if uint32(int(v)) == v {
-		return int(v)
+	v := int(fr.maxHeaderListSize())
+	if v < 0 {
+		// If maxHeaderListSize overflows an int, use no limit (0).
+		return 0
 	}
-	// They had a crazy big number for MaxHeaderBytes anyway,
-	// so give them unlimited header lengths:
-	return 0
+	return v
 }
 
 // readMetaFrame returns 0 or more CONTINUATION frames from fr and
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
new file mode 100644
index 0000000000000000000000000000000000000000..fd8632444ec243612f21ef136ce20c7b38796ede
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -0,0 +1,205 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+//	k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+//	if err != nil {
+//		log.Fatal(err)
+//	}
+//	defer k.Close()
+//
+//	s, _, err := k.GetStringValue("SystemRoot")
+//	if err != nil {
+//		log.Fatal(err)
+//	}
+//	fmt.Printf("Windows system root is %q\n", s)
+package registry
+
+import (
+	"io"
+	"runtime"
+	"syscall"
+	"time"
+)
+
+const (
+	// Registry key security and access rights.
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+	// for details.
+	ALL_ACCESS         = 0xf003f
+	CREATE_LINK        = 0x00020
+	CREATE_SUB_KEY     = 0x00004
+	ENUMERATE_SUB_KEYS = 0x00008
+	EXECUTE            = 0x20019
+	NOTIFY             = 0x00010
+	QUERY_VALUE        = 0x00001
+	READ               = 0x20019
+	SET_VALUE          = 0x00002
+	WOW64_32KEY        = 0x00200
+	WOW64_64KEY        = 0x00100
+	WRITE              = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+	// Windows defines some predefined root keys that are always open.
+	// An application can use these keys as entry points to the registry.
+	// Normally these keys are used in OpenKey to open new keys,
+	// but they can also be used anywhere a Key is required.
+	CLASSES_ROOT     = Key(syscall.HKEY_CLASSES_ROOT)
+	CURRENT_USER     = Key(syscall.HKEY_CURRENT_USER)
+	LOCAL_MACHINE    = Key(syscall.HKEY_LOCAL_MACHINE)
+	USERS            = Key(syscall.HKEY_USERS)
+	CURRENT_CONFIG   = Key(syscall.HKEY_CURRENT_CONFIG)
+	PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+	return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+	p, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return 0, err
+	}
+	var subkey syscall.Handle
+	err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+	if err != nil {
+		return 0, err
+	}
+	return Key(subkey), nil
+}
+
+// OpenRemoteKey opens a predefined registry key on another
+// computer pcname. The key to be opened is specified by k, but
+// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
+// If pcname is "", OpenRemoteKey returns local computer key.
+func OpenRemoteKey(pcname string, k Key) (Key, error) {
+	var err error
+	var p *uint16
+	if pcname != "" {
+		p, err = syscall.UTF16PtrFromString(`\\` + pcname)
+		if err != nil {
+			return 0, err
+		}
+	}
+	var remoteKey syscall.Handle
+	err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
+	if err != nil {
+		return 0, err
+	}
+	return Key(remoteKey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadSubKeyNames(n int) ([]string, error) {
+	// RegEnumKeyEx must be called repeatedly and to completion.
+	// During this time, this goroutine cannot migrate away from
+	// its current thread. See https://golang.org/issue/49320 and
+	// https://golang.org/issue/49466.
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	names := make([]string, 0)
+	// Registry key size limit is 255 bytes and described there:
+	// https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+	buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+	for i := uint32(0); ; i++ {
+		if n > 0 {
+			if len(names) == n {
+				return names, nil
+			}
+		}
+		l := uint32(len(buf))
+		for {
+			err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+			if err == nil {
+				break
+			}
+			if err == syscall.ERROR_MORE_DATA {
+				// Double buffer size and try again.
+				l = uint32(2 * len(buf))
+				buf = make([]uint16, l)
+				continue
+			}
+			if err == _ERROR_NO_MORE_ITEMS {
+				break loopItems
+			}
+			return names, err
+		}
+		names = append(names, syscall.UTF16ToString(buf[:l]))
+	}
+	if n > len(names) {
+		return names, io.EOF
+	}
+	return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+	var h syscall.Handle
+	var d uint32
+	err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+		0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+	if err != nil {
+		return 0, false, err
+	}
+	return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+	return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+	SubKeyCount     uint32
+	MaxSubKeyLen    uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+	ValueCount      uint32
+	MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+	MaxValueLen     uint32 // longest data component among the key's values, in bytes
+	lastWriteTime   syscall.Filetime
+}
+
+// ModTime returns the key's last write time.
+func (ki *KeyInfo) ModTime() time.Time {
+	return time.Unix(0, ki.lastWriteTime.Nanoseconds())
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+	var ki KeyInfo
+	err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+		&ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+		&ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+	if err != nil {
+		return nil, err
+	}
+	return &ki, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
new file mode 100644
index 0000000000000000000000000000000000000000..bbf86ccf0c05f774361a9d606c6384e1f49164ae
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package registry
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
new file mode 100644
index 0000000000000000000000000000000000000000..f533091c19ef16f174807c445600cd2b019e7210
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import "syscall"
+
+const (
+	_REG_OPTION_NON_VOLATILE = 0
+
+	_REG_CREATED_NEW_KEY     = 1
+	_REG_OPENED_EXISTING_KEY = 2
+
+	_ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+	return procRegLoadMUIStringW.Find()
+}
+
+//sys	regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys	regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys	regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys	regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys	regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys   regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+//sys	regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
+
+//sys	expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
new file mode 100644
index 0000000000000000000000000000000000000000..74db26b94dfaab59ad0dbe41f1b8b4a4faf88ceb
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -0,0 +1,386 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import (
+	"errors"
+	"io"
+	"syscall"
+	"unicode/utf16"
+	"unsafe"
+)
+
+const (
+	// Registry value types.
+	NONE                       = 0
+	SZ                         = 1
+	EXPAND_SZ                  = 2
+	BINARY                     = 3
+	DWORD                      = 4
+	DWORD_BIG_ENDIAN           = 5
+	LINK                       = 6
+	MULTI_SZ                   = 7
+	RESOURCE_LIST              = 8
+	FULL_RESOURCE_DESCRIPTOR   = 9
+	RESOURCE_REQUIREMENTS_LIST = 10
+	QWORD                      = 11
+)
+
+var (
+	// ErrShortBuffer is returned when the buffer was too short for the operation.
+	ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+	// ErrNotExist is returned when a registry key or value does not exist.
+	ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+	// ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+	ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+	pname, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return 0, 0, err
+	}
+	var pbuf *byte
+	if len(buf) > 0 {
+		pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+	}
+	l := uint32(len(buf))
+	err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+	if err != nil {
+		return int(l), valtype, err
+	}
+	return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
+	p, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return nil, 0, err
+	}
+	var t uint32
+	n := uint32(len(buf))
+	for {
+		err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+		if err == nil {
+			return buf[:n], t, nil
+		}
+		if err != syscall.ERROR_MORE_DATA {
+			return nil, 0, err
+		}
+		if n <= uint32(len(buf)) {
+			return nil, 0, err
+		}
+		buf = make([]byte, n)
+	}
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return "", typ, err2
+	}
+	switch typ {
+	case SZ, EXPAND_SZ:
+	default:
+		return "", typ, ErrUnexpectedType
+	}
+	if len(data) == 0 {
+		return "", typ, nil
+	}
+	u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+	return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+	pname, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return "", err
+	}
+
+	buf := make([]uint16, 1024)
+	var buflen uint32
+	var pdir *uint16
+
+	err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+		// Try to resolve the string value using the system directory as
+		// a DLL search path; this assumes the string value is of the form
+		// @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+		// This approach works with tzres.dll but may have to be revised
+		// in the future to allow callers to provide custom search paths.
+
+		var s string
+		s, err = ExpandString("%SystemRoot%\\system32\\")
+		if err != nil {
+			return "", err
+		}
+		pdir, err = syscall.UTF16PtrFromString(s)
+		if err != nil {
+			return "", err
+		}
+
+		err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	}
+
+	for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+		if buflen <= uint32(len(buf)) {
+			break // Buffer not growing, assume race; break
+		}
+		buf = make([]uint16, buflen)
+		err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	}
+
+	if err != nil {
+		return "", err
+	}
+
+	return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+	if value == "" {
+		return "", nil
+	}
+	p, err := syscall.UTF16PtrFromString(value)
+	if err != nil {
+		return "", err
+	}
+	r := make([]uint16, 100)
+	for {
+		n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+		if err != nil {
+			return "", err
+		}
+		if n <= uint32(len(r)) {
+			return syscall.UTF16ToString(r[:n]), nil
+		}
+		r = make([]uint16, n)
+	}
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return nil, typ, err2
+	}
+	if typ != MULTI_SZ {
+		return nil, typ, ErrUnexpectedType
+	}
+	if len(data) == 0 {
+		return nil, typ, nil
+	}
+	p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+	if len(p) == 0 {
+		return nil, typ, nil
+	}
+	if p[len(p)-1] == 0 {
+		p = p[:len(p)-1] // remove terminating null
+	}
+	val = make([]string, 0, 5)
+	from := 0
+	for i, c := range p {
+		if c == 0 {
+			val = append(val, string(utf16.Decode(p[from:i])))
+			from = i + 1
+		}
+	}
+	return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 8))
+	if err2 != nil {
+		return 0, typ, err2
+	}
+	switch typ {
+	case DWORD:
+		if len(data) != 4 {
+			return 0, typ, errors.New("DWORD value is not 4 bytes long")
+		}
+		var val32 uint32
+		copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+		return uint64(val32), DWORD, nil
+	case QWORD:
+		if len(data) != 8 {
+			return 0, typ, errors.New("QWORD value is not 8 bytes long")
+		}
+		copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+		return val, QWORD, nil
+	default:
+		return 0, typ, ErrUnexpectedType
+	}
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return nil, typ, err2
+	}
+	if typ != BINARY {
+		return nil, typ, ErrUnexpectedType
+	}
+	return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+	p, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return err
+	}
+	if len(data) == 0 {
+		return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+	}
+	return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+	return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+	return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+	v, err := syscall.UTF16FromString(value)
+	if err != nil {
+		return err
+	}
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+	return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+	return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+	return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+	ss := ""
+	for _, s := range value {
+		for i := 0; i < len(s); i++ {
+			if s[i] == 0 {
+				return errors.New("string cannot have 0 inside")
+			}
+		}
+		ss += s + "\x00"
+	}
+	v := utf16.Encode([]rune(ss + "\x00"))
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+	return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+	return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+	return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+}
+
+// ReadValueNames returns the value names of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadValueNames(n int) ([]string, error) {
+	ki, err := k.Stat()
+	if err != nil {
+		return nil, err
+	}
+	names := make([]string, 0, ki.ValueCount)
+	buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+	for i := uint32(0); ; i++ {
+		if n > 0 {
+			if len(names) == n {
+				return names, nil
+			}
+		}
+		l := uint32(len(buf))
+		for {
+			err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+			if err == nil {
+				break
+			}
+			if err == syscall.ERROR_MORE_DATA {
+				// Double buffer size and try again.
+				l = uint32(2 * len(buf))
+				buf = make([]uint16, l)
+				continue
+			}
+			if err == _ERROR_NO_MORE_ITEMS {
+				break loopItems
+			}
+			return names, err
+		}
+		names = append(names, syscall.UTF16ToString(buf[:l]))
+	}
+	if n > len(names) {
+		return names, io.EOF
+	}
+	return names, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc1835d8a233aaa12be8eaabb134e9ae1b420002
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -0,0 +1,117 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+	procRegConnectRegistryW       = modadvapi32.NewProc("RegConnectRegistryW")
+	procRegCreateKeyExW           = modadvapi32.NewProc("RegCreateKeyExW")
+	procRegDeleteKeyW             = modadvapi32.NewProc("RegDeleteKeyW")
+	procRegDeleteValueW           = modadvapi32.NewProc("RegDeleteValueW")
+	procRegEnumValueW             = modadvapi32.NewProc("RegEnumValueW")
+	procRegLoadMUIStringW         = modadvapi32.NewProc("RegLoadMUIStringW")
+	procRegSetValueExW            = modadvapi32.NewProc("RegSetValueExW")
+	procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index f0e0cf3cb1dbcbaad91e962bd040c24c9d2d55bf..8f6c7f493f8e9a4edab8057dbc4a538db8f759da 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit {
 // or its associated context.Context is canceled.
 //
 // The methods AllowN, ReserveN, and WaitN consume n tokens.
+//
+// Limiter is safe for simultaneous use by multiple goroutines.
 type Limiter struct {
 	mu     sync.Mutex
 	limit  Limit
diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..3543268f84ec7a7deb8af4cc49c55a037505deb8
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -0,0 +1,235 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.9
+// source: google/api/httpbody.proto
+
+package httpbody
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Message that represents an arbitrary HTTP body. It should only be used for
+// payload formats that can't be represented as JSON, such as raw binary or
+// an HTML page.
+//
+// This message can be used both in streaming and non-streaming API methods in
+// the request as well as the response.
+//
+// It can be used as a top-level request field, which is convenient if one
+// wants to extract parameters from either the URL or HTTP template into the
+// request fields and also want access to the raw HTTP body.
+//
+// Example:
+//
+//	message GetResourceRequest {
+//	  // A unique request id.
+//	  string request_id = 1;
+//
+//	  // The raw HTTP body is bound to this field.
+//	  google.api.HttpBody http_body = 2;
+//
+//	}
+//
+//	service ResourceService {
+//	  rpc GetResource(GetResourceRequest)
+//	    returns (google.api.HttpBody);
+//	  rpc UpdateResource(google.api.HttpBody)
+//	    returns (google.protobuf.Empty);
+//
+//	}
+//
+// Example with streaming methods:
+//
+//	service CaldavService {
+//	  rpc GetCalendar(stream google.api.HttpBody)
+//	    returns (stream google.api.HttpBody);
+//	  rpc UpdateCalendar(stream google.api.HttpBody)
+//	    returns (stream google.api.HttpBody);
+//
+//	}
+//
+// Use of this type only changes how the request and response bodies are
+// handled, all other features will continue to work unchanged.
+type HttpBody struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The HTTP Content-Type header value specifying the content type of the body.
+	ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+	// The HTTP request/response body as raw binary.
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	// Application specific response metadata. Must be set in the first response
+	// for streaming APIs.
+	Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *HttpBody) Reset() {
+	*x = HttpBody{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_httpbody_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HttpBody) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpBody) ProtoMessage() {}
+
+func (x *HttpBody) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_httpbody_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead.
+func (*HttpBody) Descriptor() ([]byte, []int) {
+	return file_google_api_httpbody_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpBody) GetContentType() string {
+	if x != nil {
+		return x.ContentType
+	}
+	return ""
+}
+
+func (x *HttpBody) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+func (x *HttpBody) GetExtensions() []*anypb.Any {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+var File_google_api_httpbody_proto protoreflect.FileDescriptor
+
+var file_google_api_httpbody_proto_rawDesc = []byte{
+	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
+	0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
+	0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
+	0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+	0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63,
+	0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48,
+	0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
+	0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f,
+	0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+	0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_api_httpbody_proto_rawDescOnce sync.Once
+	file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc
+)
+
+func file_google_api_httpbody_proto_rawDescGZIP() []byte {
+	file_google_api_httpbody_proto_rawDescOnce.Do(func() {
+		file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData)
+	})
+	return file_google_api_httpbody_proto_rawDescData
+}
+
+var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_api_httpbody_proto_goTypes = []interface{}{
+	(*HttpBody)(nil),  // 0: google.api.HttpBody
+	(*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_api_httpbody_proto_depIdxs = []int32{
+	1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_api_httpbody_proto_init() }
+func file_google_api_httpbody_proto_init() {
+	if File_google_api_httpbody_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*HttpBody); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_httpbody_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_httpbody_proto_goTypes,
+		DependencyIndexes: file_google_api_httpbody_proto_depIdxs,
+		MessageInfos:      file_google_api_httpbody_proto_msgTypes,
+	}.Build()
+	File_google_api_httpbody_proto = out.File
+	file_google_api_httpbody_proto_rawDesc = nil
+	file_google_api_httpbody_proto_goTypes = nil
+	file_google_api_httpbody_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bd161e48ad7ab3389e504c3f0e92994a4e63403
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -0,0 +1,1314 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.9
+// source: google/rpc/error_details.proto
+
+package errdetails
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes the cause of the error with structured details.
+//
+// Example of an error when contacting the "pubsub.googleapis.com" API when it
+// is not enabled:
+//
+//	{ "reason": "API_DISABLED"
+//	  "domain": "googleapis.com"
+//	  "metadata": {
+//	    "resource": "projects/123",
+//	    "service": "pubsub.googleapis.com"
+//	  }
+//	}
+//
+// This response indicates that the pubsub.googleapis.com API is not enabled.
+//
+// Example of an error that is returned when attempting to create a Spanner
+// instance in a region that is out of stock:
+//
+//	{ "reason": "STOCKOUT"
+//	  "domain": "spanner.googleapis.com",
+//	  "metadata": {
+//	    "availableRegions": "us-central1,us-east2"
+//	  }
+//	}
+type ErrorInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The reason of the error. This is a constant value that identifies the
+	// proximate cause of the error. Error reasons are unique within a particular
+	// domain of errors. This should be at most 63 characters and match a
+	// regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents
+	// UPPER_SNAKE_CASE.
+	Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
+	// The logical grouping to which the "reason" belongs. The error domain
+	// is typically the registered service name of the tool or product that
+	// generates the error. Example: "pubsub.googleapis.com". If the error is
+	// generated by some common infrastructure, the error domain must be a
+	// globally unique value that identifies the infrastructure. For Google API
+	// infrastructure, the error domain is "googleapis.com".
+	Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
+	// Additional structured details about this error.
+	//
+	// Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
+	// length. When identifying the current value of an exceeded limit, the units
+	// should be contained in the key, not the value.  For example, rather than
+	// {"instanceLimit": "100/request"}, should be returned as,
+	// {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
+	// instances that can be created in a single (batch) request.
+	Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ErrorInfo) Reset() {
+	*x = ErrorInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ErrorInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorInfo) ProtoMessage() {}
+
+func (x *ErrorInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead.
+func (*ErrorInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ErrorInfo) GetReason() string {
+	if x != nil {
+		return x.Reason
+	}
+	return ""
+}
+
+func (x *ErrorInfo) GetDomain() string {
+	if x != nil {
+		return x.Domain
+	}
+	return ""
+}
+
+func (x *ErrorInfo) GetMetadata() map[string]string {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying.  If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retries have been reached or a maximum retry delay cap has been
+// reached.
+type RetryInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Clients should wait at least this long between retrying the same request.
+	RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"`
+}
+
+func (x *RetryInfo) Reset() {
+	*x = RetryInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *RetryInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryInfo) ProtoMessage() {}
+
+func (x *RetryInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead.
+func (*RetryInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RetryInfo) GetRetryDelay() *durationpb.Duration {
+	if x != nil {
+		return x.RetryDelay
+	}
+	return nil
+}
+
+// Describes additional debugging info.
+type DebugInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The stack trace entries indicating where the error occurred.
+	StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"`
+	// Additional debugging information provided by the server.
+	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+}
+
+func (x *DebugInfo) Reset() {
+	*x = DebugInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DebugInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugInfo) ProtoMessage() {}
+
+func (x *DebugInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead.
+func (*DebugInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DebugInfo) GetStackEntries() []string {
+	if x != nil {
+		return x.StackEntries
+	}
+	return nil
+}
+
+func (x *DebugInfo) GetDetail() string {
+	if x != nil {
+		return x.Detail
+	}
+	return ""
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded.  If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryInfo and Help types for other details about handling a
+// quota failure.
+type QuotaFailure struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all quota violations.
+	Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *QuotaFailure) Reset() {
+	*x = QuotaFailure{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QuotaFailure) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure) ProtoMessage() {}
+
+func (x *QuotaFailure) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead.
+func (*QuotaFailure) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation {
+	if x != nil {
+		return x.Violations
+	}
+	return nil
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+type PreconditionFailure struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all precondition violations.
+	Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *PreconditionFailure) Reset() {
+	*x = PreconditionFailure{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PreconditionFailure) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure) ProtoMessage() {}
+
+func (x *PreconditionFailure) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation {
+	if x != nil {
+		return x.Violations
+	}
+	return nil
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+type BadRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all violations in a client request.
+	FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"`
+}
+
+func (x *BadRequest) Reset() {
+	*x = BadRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BadRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest) ProtoMessage() {}
+
+func (x *BadRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead.
+func (*BadRequest) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation {
+	if x != nil {
+		return x.FieldViolations
+	}
+	return nil
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+type RequestInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An opaque string that should only be interpreted by the service generating
+	// it. For example, it can be used to identify requests in the service's logs.
+	RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
+	// Any data that was used to serve this request. For example, an encrypted
+	// stack trace that can be sent back to the service provider for debugging.
+	ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+}
+
+func (x *RequestInfo) Reset() {
+	*x = RequestInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *RequestInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestInfo) ProtoMessage() {}
+
+func (x *RequestInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead.
+func (*RequestInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RequestInfo) GetRequestId() string {
+	if x != nil {
+		return x.RequestId
+	}
+	return ""
+}
+
+func (x *RequestInfo) GetServingData() string {
+	if x != nil {
+		return x.ServingData
+	}
+	return ""
+}
+
+// Describes the resource that is being accessed.
+type ResourceInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A name for the type of resource being accessed, e.g. "sql table",
+	// "cloud storage bucket", "file", "Google calendar"; or the type URL
+	// of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+	ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+	// The name of the resource being accessed.  For example, a shared calendar
+	// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+	// error is
+	// [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+	ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+	// The owner of the resource (optional).
+	// For example, "user:<owner email>" or "project:<Google developer project
+	// id>".
+	Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
+	// Describes what error is encountered when accessing this resource.
+	// For example, updating a cloud project may require the `writer` permission
+	// on the developer console project.
+	Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *ResourceInfo) Reset() {
+	*x = ResourceInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceInfo) ProtoMessage() {}
+
+func (x *ResourceInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead.
+func (*ResourceInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ResourceInfo) GetResourceType() string {
+	if x != nil {
+		return x.ResourceType
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetResourceName() string {
+	if x != nil {
+		return x.ResourceName
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetOwner() string {
+	if x != nil {
+		return x.Owner
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+type Help struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// URL(s) pointing to additional information on handling the current error.
+	Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *Help) Reset() {
+	*x = Help{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Help) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help) ProtoMessage() {}
+
+func (x *Help) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help.ProtoReflect.Descriptor instead.
+func (*Help) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Help) GetLinks() []*Help_Link {
+	if x != nil {
+		return x.Links
+	}
+	return nil
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+type LocalizedMessage struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The locale used following the specification defined at
+	// https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+	// Examples are: "en-US", "fr-CH", "es-MX"
+	Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
+	// The localized error message in the above locale.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *LocalizedMessage) Reset() {
+	*x = LocalizedMessage{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LocalizedMessage) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocalizedMessage) ProtoMessage() {}
+
+func (x *LocalizedMessage) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead.
+func (*LocalizedMessage) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *LocalizedMessage) GetLocale() string {
+	if x != nil {
+		return x.Locale
+	}
+	return ""
+}
+
+func (x *LocalizedMessage) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+// A message type used to describe a single quota violation.  For example, a
+// daily quota or a custom quota that was exceeded.
+type QuotaFailure_Violation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The subject on which the quota check failed.
+	// For example, "clientip:<ip address of client>" or "project:<Google
+	// developer project id>".
+	Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the quota check failed. Clients can use this
+	// description to find more about the quota configuration in the service's
+	// public documentation, or find the relevant quota limit to adjust through
+	// developer console.
+	//
+	// For example: "Service disabled" or "Daily Limit for read operations
+	// exceeded".
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *QuotaFailure_Violation) Reset() {
+	*x = QuotaFailure_Violation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QuotaFailure_Violation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure_Violation) ProtoMessage() {}
+
+func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead.
+func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *QuotaFailure_Violation) GetSubject() string {
+	if x != nil {
+		return x.Subject
+	}
+	return ""
+}
+
+func (x *QuotaFailure_Violation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// A message type used to describe a single precondition failure.
+type PreconditionFailure_Violation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The type of PreconditionFailure. We recommend using a service-specific
+	// enum type to define the supported precondition violation subjects. For
+	// example, "TOS" for "Terms of Service violation".
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// The subject, relative to the type, that failed.
+	// For example, "google.com/cloud" relative to the "TOS" type would indicate
+	// which terms of service is being referenced.
+	Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the precondition failed. Developers can use this
+	// description to understand how to fix the failure.
+	//
+	// For example: "Terms of service not accepted".
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *PreconditionFailure_Violation) Reset() {
+	*x = PreconditionFailure_Violation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PreconditionFailure_Violation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure_Violation) ProtoMessage() {}
+
+func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PreconditionFailure_Violation) GetType() string {
+	if x != nil {
+		return x.Type
+	}
+	return ""
+}
+
+func (x *PreconditionFailure_Violation) GetSubject() string {
+	if x != nil {
+		return x.Subject
+	}
+	return ""
+}
+
+func (x *PreconditionFailure_Violation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// A message type used to describe a single bad request field.
+type BadRequest_FieldViolation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A path that leads to a field in the request body. The value will be a
+	// sequence of dot-separated identifiers that identify a protocol buffer
+	// field.
+	//
+	// Consider the following:
+	//
+	//	message CreateContactRequest {
+	//	  message EmailAddress {
+	//	    enum Type {
+	//	      TYPE_UNSPECIFIED = 0;
+	//	      HOME = 1;
+	//	      WORK = 2;
+	//	    }
+	//
+	//	    optional string email = 1;
+	//	    repeated EmailType type = 2;
+	//	  }
+	//
+	//	  string full_name = 1;
+	//	  repeated EmailAddress email_addresses = 2;
+	//	}
+	//
+	// In this example, in proto `field` could take one of the following values:
+	//
+	//   - `full_name` for a violation in the `full_name` value
+	//   - `email_addresses[1].email` for a violation in the `email` field of the
+	//     first `email_addresses` message
+	//   - `email_addresses[3].type[2]` for a violation in the second `type`
+	//     value in the third `email_addresses` message.
+	//
+	// In JSON, the same values are represented as:
+	//
+	//   - `fullName` for a violation in the `fullName` value
+	//   - `emailAddresses[1].email` for a violation in the `email` field of the
+	//     first `emailAddresses` message
+	//   - `emailAddresses[3].type[2]` for a violation in the second `type`
+	//     value in the third `emailAddresses` message.
+	Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+	// A description of why the request element is bad.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *BadRequest_FieldViolation) Reset() {
+	*x = BadRequest_FieldViolation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BadRequest_FieldViolation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest_FieldViolation) ProtoMessage() {}
+
+func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[13]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead.
+func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *BadRequest_FieldViolation) GetField() string {
+	if x != nil {
+		return x.Field
+	}
+	return ""
+}
+
+func (x *BadRequest_FieldViolation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// Describes a URL link.
+type Help_Link struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes what the link offers.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The URL of the link.
+	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *Help_Link) Reset() {
+	*x = Help_Link{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Help_Link) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help_Link) ProtoMessage() {}
+
+func (x *Help_Link) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead.
+func (*Help_Link) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *Help_Link) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Help_Link) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+var File_google_rpc_error_details_proto protoreflect.FileDescriptor
+
+var file_google_rpc_error_details_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72,
+	0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a,
+	0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65,
+	0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73,
+	0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65,
+	0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49,
+	0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d,
+	0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72,
+	0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64,
+	0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61,
+	0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23,
+	0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
+	0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c,
+	0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
+	0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
+	0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
+	0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
+	0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+	0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+	0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61,
+	0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
+	0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
+	0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
+	0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
+	0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
+	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
+	0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
+	0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70,
+	0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c,
+	0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a,
+	0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63,
+	0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c,
+	0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
+	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
+	0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70,
+	0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72,
+	0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_rpc_error_details_proto_rawDescOnce sync.Once
+	file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc
+)
+
+func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
+	file_google_rpc_error_details_proto_rawDescOnce.Do(func() {
+		file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData)
+	})
+	return file_google_rpc_error_details_proto_rawDescData
+}
+
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_google_rpc_error_details_proto_goTypes = []interface{}{
+	(*ErrorInfo)(nil),                     // 0: google.rpc.ErrorInfo
+	(*RetryInfo)(nil),                     // 1: google.rpc.RetryInfo
+	(*DebugInfo)(nil),                     // 2: google.rpc.DebugInfo
+	(*QuotaFailure)(nil),                  // 3: google.rpc.QuotaFailure
+	(*PreconditionFailure)(nil),           // 4: google.rpc.PreconditionFailure
+	(*BadRequest)(nil),                    // 5: google.rpc.BadRequest
+	(*RequestInfo)(nil),                   // 6: google.rpc.RequestInfo
+	(*ResourceInfo)(nil),                  // 7: google.rpc.ResourceInfo
+	(*Help)(nil),                          // 8: google.rpc.Help
+	(*LocalizedMessage)(nil),              // 9: google.rpc.LocalizedMessage
+	nil,                                   // 10: google.rpc.ErrorInfo.MetadataEntry
+	(*QuotaFailure_Violation)(nil),        // 11: google.rpc.QuotaFailure.Violation
+	(*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
+	(*BadRequest_FieldViolation)(nil),     // 13: google.rpc.BadRequest.FieldViolation
+	(*Help_Link)(nil),                     // 14: google.rpc.Help.Link
+	(*durationpb.Duration)(nil),           // 15: google.protobuf.Duration
+}
+var file_google_rpc_error_details_proto_depIdxs = []int32{
+	10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
+	15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+	11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
+	12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+	13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+	14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+	6,  // [6:6] is the sub-list for method output_type
+	6,  // [6:6] is the sub-list for method input_type
+	6,  // [6:6] is the sub-list for extension type_name
+	6,  // [6:6] is the sub-list for extension extendee
+	0,  // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_error_details_proto_init() }
+func file_google_rpc_error_details_proto_init() {
+	if File_google_rpc_error_details_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ErrorInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*RetryInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DebugInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QuotaFailure); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PreconditionFailure); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BadRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*RequestInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Help); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LocalizedMessage); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QuotaFailure_Violation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PreconditionFailure_Violation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BadRequest_FieldViolation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Help_Link); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   15,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_rpc_error_details_proto_goTypes,
+		DependencyIndexes: file_google_rpc_error_details_proto_depIdxs,
+		MessageInfos:      file_google_rpc_error_details_proto_msgTypes,
+	}.Build()
+	File_google_rpc_error_details_proto = out.File
+	file_google_rpc_error_details_proto_rawDesc = nil
+	file_google_rpc_error_details_proto_goTypes = nil
+	file_google_rpc_error_details_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
new file mode 100644
index 0000000000000000000000000000000000000000..6306e8bb0f0add210c6543633a285cb5f0a4d88b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gzip implements and registers the gzip compressor
+// during the initialization.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package gzip
+
+import (
+	"compress/gzip"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"google.golang.org/grpc/encoding"
+)
+
+// Name is the name registered for the gzip compressor.
+const Name = "gzip"
+
+func init() {
+	c := &compressor{}
+	c.poolCompressor.New = func() any {
+		return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor}
+	}
+	encoding.RegisterCompressor(c)
+}
+
+type writer struct {
+	*gzip.Writer
+	pool *sync.Pool
+}
+
+// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported).
+// NOTE: this function must only be called during initialization time (i.e. in an init() function),
+// and is not thread-safe.
+//
+// The error returned will be nil if the specified level is valid.
+func SetLevel(level int) error {
+	if level < gzip.DefaultCompression || level > gzip.BestCompression {
+		return fmt.Errorf("grpc: invalid gzip compression level: %d", level)
+	}
+	c := encoding.GetCompressor(Name).(*compressor)
+	c.poolCompressor.New = func() any {
+		w, err := gzip.NewWriterLevel(io.Discard, level)
+		if err != nil {
+			panic(err)
+		}
+		return &writer{Writer: w, pool: &c.poolCompressor}
+	}
+	return nil
+}
+
+func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) {
+	z := c.poolCompressor.Get().(*writer)
+	z.Writer.Reset(w)
+	return z, nil
+}
+
+func (z *writer) Close() error {
+	defer z.pool.Put(z)
+	return z.Writer.Close()
+}
+
+type reader struct {
+	*gzip.Reader
+	pool *sync.Pool
+}
+
+func (c *compressor) Decompress(r io.Reader) (io.Reader, error) {
+	z, inPool := c.poolDecompressor.Get().(*reader)
+	if !inPool {
+		newZ, err := gzip.NewReader(r)
+		if err != nil {
+			return nil, err
+		}
+		return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil
+	}
+	if err := z.Reset(r); err != nil {
+		c.poolDecompressor.Put(z)
+		return nil, err
+	}
+	return z, nil
+}
+
+func (z *reader) Read(p []byte) (n int, err error) {
+	n, err = z.Reader.Read(p)
+	if err == io.EOF {
+		z.pool.Put(z)
+	}
+	return n, err
+}
+
+// RFC1952 specifies that the last four bytes "contains the size of
+// the original (uncompressed) input data modulo 2^32."
+// gRPC has a max message size of 2GB so we don't need to worry about wraparound.
+func (c *compressor) DecompressedSize(buf []byte) int {
+	last := len(buf)
+	if last < 4 {
+		return -1
+	}
+	return int(binary.LittleEndian.Uint32(buf[last-4 : last]))
+}
+
+func (c *compressor) Name() string {
+	return Name
+}
+
+type compressor struct {
+	poolCompressor   sync.Pool
+	poolDecompressor sync.Pool
+}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..24299efd63f7a71f1b0980c16ef6d0c5e42cd217
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -0,0 +1,308 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.31.0
+// 	protoc        v4.22.0
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type HealthCheckResponse_ServingStatus int32
+
+const (
+	HealthCheckResponse_UNKNOWN         HealthCheckResponse_ServingStatus = 0
+	HealthCheckResponse_SERVING         HealthCheckResponse_ServingStatus = 1
+	HealthCheckResponse_NOT_SERVING     HealthCheckResponse_ServingStatus = 2
+	HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
+)
+
+// Enum value maps for HealthCheckResponse_ServingStatus.
+var (
+	HealthCheckResponse_ServingStatus_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "SERVING",
+		2: "NOT_SERVING",
+		3: "SERVICE_UNKNOWN",
+	}
+	HealthCheckResponse_ServingStatus_value = map[string]int32{
+		"UNKNOWN":         0,
+		"SERVING":         1,
+		"NOT_SERVING":     2,
+		"SERVICE_UNKNOWN": 3,
+	}
+)
+
+func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
+	p := new(HealthCheckResponse_ServingStatus)
+	*p = x
+	return p
+}
+
+func (x HealthCheckResponse_ServingStatus) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
+	return &file_grpc_health_v1_health_proto_enumTypes[0]
+}
+
+func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
+func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type HealthCheckRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *HealthCheckRequest) Reset() {
+	*x = HealthCheckRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HealthCheckRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest) ProtoMessage() {}
+
+func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
+	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheckRequest) GetService() string {
+	if x != nil {
+		return x.Service
+	}
+	return ""
+}
+
+type HealthCheckResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+}
+
+func (x *HealthCheckResponse) Reset() {
+	*x = HealthCheckResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HealthCheckResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse) ProtoMessage() {}
+
+func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
+	if x != nil {
+		return x.Status
+	}
+	return HealthCheckResponse_UNKNOWN
+}
+
+var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
+
+var file_grpc_health_v1_health_proto_rawDesc = []byte{
+	0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
+	0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
+	0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
+	0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
+	0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
+	0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
+	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+	0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+	0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+	0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
+	0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
+	0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
+	0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
+	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
+	0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+	0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+	0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+	0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
+	0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
+	0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+	0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
+	0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
+	0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
+	0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
+	0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c,
+	0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_grpc_health_v1_health_proto_rawDescOnce sync.Once
+	file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc
+)
+
+func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
+	file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
+		file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData)
+	})
+	return file_grpc_health_v1_health_proto_rawDescData
+}
+
+var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_grpc_health_v1_health_proto_goTypes = []interface{}{
+	(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
+	(*HealthCheckRequest)(nil),             // 1: grpc.health.v1.HealthCheckRequest
+	(*HealthCheckResponse)(nil),            // 2: grpc.health.v1.HealthCheckResponse
+}
+var file_grpc_health_v1_health_proto_depIdxs = []int32{
+	0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
+	1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
+	1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
+	2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
+	2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
+	3, // [3:5] is the sub-list for method output_type
+	1, // [1:3] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_grpc_health_v1_health_proto_init() }
+func file_grpc_health_v1_health_proto_init() {
+	if File_grpc_health_v1_health_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*HealthCheckRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*HealthCheckResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_grpc_health_v1_health_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   2,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_grpc_health_v1_health_proto_goTypes,
+		DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
+		EnumInfos:         file_grpc_health_v1_health_proto_enumTypes,
+		MessageInfos:      file_grpc_health_v1_health_proto_msgTypes,
+	}.Build()
+	File_grpc_health_v1_health_proto = out.File
+	file_grpc_health_v1_health_proto_rawDesc = nil
+	file_grpc_health_v1_health_proto_goTypes = nil
+	file_grpc_health_v1_health_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..4439cda0f3cb7e57bc9a86620309c9a31e03ffe4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -0,0 +1,237 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc             v4.22.0
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+	Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
+	Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
+)
+
+// HealthClient is the client API for Health service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type HealthClient interface {
+	// Check gets the health of the specified service. If the requested service
+	// is unknown, the call will fail with status NOT_FOUND. If the caller does
+	// not specify a service name, the server should respond with its overall
+	// health status.
+	//
+	// Clients should set a deadline when calling Check, and can declare the
+	// server unhealthy if they do not receive a timely response.
+	//
+	// Check implementations should be idempotent and side effect free.
+	Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+	// Performs a watch for the serving status of the requested service.
+	// The server will immediately send back a message indicating the current
+	// serving status.  It will then subsequently send a new message whenever
+	// the service's serving status changes.
+	//
+	// If the requested service is unknown when the call is received, the
+	// server will send a message setting the serving status to
+	// SERVICE_UNKNOWN but will *not* terminate the call.  If at some
+	// future point, the serving status of the service becomes known, the
+	// server will send a new message with the service's serving status.
+	//
+	// If the call terminates with status UNIMPLEMENTED, then clients
+	// should assume this method is not supported and should not retry the
+	// call.  If the call terminates with any other status (including OK),
+	// clients should retry the call with appropriate exponential backoff.
+	Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+}
+
+type healthClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
+	return &healthClient{cc}
+}
+
+func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
+	out := new(HealthCheckResponse)
+	err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
+	stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &healthWatchClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Health_WatchClient interface {
+	Recv() (*HealthCheckResponse, error)
+	grpc.ClientStream
+}
+
+type healthWatchClient struct {
+	grpc.ClientStream
+}
+
+func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
+	m := new(HealthCheckResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// HealthServer is the server API for Health service.
+// All implementations should embed UnimplementedHealthServer
+// for forward compatibility
+type HealthServer interface {
+	// Check gets the health of the specified service. If the requested service
+	// is unknown, the call will fail with status NOT_FOUND. If the caller does
+	// not specify a service name, the server should respond with its overall
+	// health status.
+	//
+	// Clients should set a deadline when calling Check, and can declare the
+	// server unhealthy if they do not receive a timely response.
+	//
+	// Check implementations should be idempotent and side effect free.
+	Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+	// Performs a watch for the serving status of the requested service.
+	// The server will immediately send back a message indicating the current
+	// serving status.  It will then subsequently send a new message whenever
+	// the service's serving status changes.
+	//
+	// If the requested service is unknown when the call is received, the
+	// server will send a message setting the serving status to
+	// SERVICE_UNKNOWN but will *not* terminate the call.  If at some
+	// future point, the serving status of the service becomes known, the
+	// server will send a new message with the service's serving status.
+	//
+	// If the call terminates with status UNIMPLEMENTED, then clients
+	// should assume this method is not supported and should not retry the
+	// call.  If the call terminates with any other status (including OK),
+	// clients should retry the call with appropriate exponential backoff.
+	Watch(*HealthCheckRequest, Health_WatchServer) error
+}
+
+// UnimplementedHealthServer should be embedded to have forward compatible implementations.
+type UnimplementedHealthServer struct {
+}
+
+func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
+}
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
+	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
+// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to HealthServer will
+// result in compilation errors.
+type UnsafeHealthServer interface {
+	mustEmbedUnimplementedHealthServer()
+}
+
+func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+	s.RegisterService(&Health_ServiceDesc, srv)
+}
+
+func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HealthCheckRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(HealthServer).Check(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Health_Check_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(HealthCheckRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
+}
+
+type Health_WatchServer interface {
+	Send(*HealthCheckResponse) error
+	grpc.ServerStream
+}
+
+type healthWatchServer struct {
+	grpc.ServerStream
+}
+
+func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Health_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "grpc.health.v1.Health",
+	HandlerType: (*HealthServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Check",
+			Handler:    _Health_Check_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Watch",
+			Handler:       _Health_Watch_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "grpc/health/v1/health.proto",
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
new file mode 100644
index 0000000000000000000000000000000000000000..2ef36bbcf92ac2166808590eb355fe7966198f1f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodelim marshals and unmarshals varint size-delimited messages.
+package protodelim
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/proto"
+)
+
+// MarshalOptions is a configurable varint size-delimited marshaler.
+type MarshalOptions struct{ proto.MarshalOptions }
+
+// MarshalTo writes a varint size-delimited wire-format message to w.
+// If w returns an error, MarshalTo returns it unchanged.
+func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
+	msgBytes, err := o.MarshalOptions.Marshal(m)
+	if err != nil {
+		return 0, err
+	}
+
+	sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
+	sizeWritten, err := w.Write(sizeBytes)
+	if err != nil {
+		return sizeWritten, err
+	}
+	msgWritten, err := w.Write(msgBytes)
+	if err != nil {
+		return sizeWritten + msgWritten, err
+	}
+	return sizeWritten + msgWritten, nil
+}
+
+// MarshalTo writes a varint size-delimited wire-format message to w
+// with the default options.
+//
+// See the documentation for [MarshalOptions.MarshalTo].
+func MarshalTo(w io.Writer, m proto.Message) (int, error) {
+	return MarshalOptions{}.MarshalTo(w, m)
+}
+
+// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
+type UnmarshalOptions struct {
+	proto.UnmarshalOptions
+
+	// MaxSize is the maximum size in wire-format bytes of a single message.
+	// Unmarshaling a message larger than MaxSize will return an error.
+	// A zero MaxSize will default to 4 MiB.
+	// Setting MaxSize to -1 disables the limit.
+	MaxSize int64
+}
+
+const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
+
+// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
+// that is larger than its configured [UnmarshalOptions.MaxSize].
+type SizeTooLargeError struct {
+	// Size is the varint size of the message encountered
+	// that was larger than the provided MaxSize.
+	Size uint64
+
+	// MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
+	MaxSize uint64
+}
+
+func (e *SizeTooLargeError) Error() string {
+	return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
+}
+
+// Reader is the interface expected by [UnmarshalFrom].
+// It is implemented by *[bufio.Reader].
+type Reader interface {
+	io.Reader
+	io.ByteReader
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// The error is [io.EOF] error only if no bytes are read.
+// If an EOF happens after reading some but not all the bytes,
+// UnmarshalFrom returns a non-io.EOF error.
+// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
+// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
+func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
+	var sizeArr [binary.MaxVarintLen64]byte
+	sizeBuf := sizeArr[:0]
+	for i := range sizeArr {
+		b, err := r.ReadByte()
+		if err != nil {
+			// Immediate EOF is unexpected.
+			if err == io.EOF && i != 0 {
+				break
+			}
+			return err
+		}
+		sizeBuf = append(sizeBuf, b)
+		if b < 0x80 {
+			break
+		}
+	}
+	size, n := protowire.ConsumeVarint(sizeBuf)
+	if n < 0 {
+		return protowire.ParseError(n)
+	}
+
+	maxSize := o.MaxSize
+	if maxSize == 0 {
+		maxSize = defaultMaxSize
+	}
+	if maxSize != -1 && size > uint64(maxSize) {
+		return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
+	}
+
+	var b []byte
+	var err error
+	if br, ok := r.(*bufio.Reader); ok {
+		// Use the []byte from the bufio.Reader instead of having to allocate one.
+		// This reduces CPU usage and allocated bytes.
+		b, err = br.Peek(int(size))
+		if err == nil {
+			defer br.Discard(int(size))
+		} else {
+			b = nil
+		}
+	}
+	if b == nil {
+		b = make([]byte, size)
+		_, err = io.ReadFull(r, b)
+	}
+
+	if err == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	if err != nil {
+		return err
+	}
+	if err := o.Unmarshal(b, m); err != nil {
+		return err
+	}
+	return nil
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r with the default options.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the documentation for [UnmarshalOptions.UnmarshalFrom].
+func UnmarshalFrom(r Reader, m proto.Message) error {
+	return UnmarshalOptions{}.UnmarshalFrom(r, m)
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..e8789cb331eb8707ae9d464a58dfa50320228ab9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -0,0 +1,588 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/field_mask.proto
+
+// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto.
+//
+// The FieldMask message represents a set of symbolic field paths.
+// The paths are specific to some target message type,
+// which is not stored within the FieldMask message itself.
+//
+// # Constructing a FieldMask
+//
+// The New function is used construct a FieldMask:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of fm
+//
+// The "field.name" and "field.number" paths are valid paths according to the
+// google.protobuf.DescriptorProto message. Use of a path that does not correlate
+// to valid fields reachable from DescriptorProto would result in an error.
+//
+// Once a FieldMask message has been constructed,
+// the Append method can be used to insert additional paths to the path set:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	if err := fm.Append(messageType, "options"); err != nil {
+//		... // handle error
+//	}
+//
+// # Type checking a FieldMask
+//
+// In order to verify that a FieldMask represents a set of fields that are
+// reachable from some target message type, use the IsValid method:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	if fm.IsValid(messageType) {
+//		... // make use of fm
+//	}
+//
+// IsValid needs to be passed the target message type as an input since the
+// FieldMask message itself does not store the message type that the set of paths
+// are for.
+package fieldmaskpb
+
+import (
+	proto "google.golang.org/protobuf/proto"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sort "sort"
+	strings "strings"
+	sync "sync"
+)
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+//	paths: "f.a"
+//	paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+//	f {
+//	  a : 22
+//	  b {
+//	    d : 1
+//	    x : 2
+//	  }
+//	  y : 13
+//	}
+//	z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//	f {
+//	  a : 22
+//	  b {
+//	    d : 1
+//	  }
+//	}
+//
+// A repeated field is not allowed except at the last position of a
+// paths string.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API.  In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, new values will
+// be appended to the existing repeated field in the target resource. Note that
+// a repeated field is only allowed in the last position of a `paths` string.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then new value will be merged into the existing sub-message
+// in the target resource.
+//
+// For example, given the target message:
+//
+//	f {
+//	  b {
+//	    d: 1
+//	    x: 2
+//	  }
+//	  c: [1]
+//	}
+//
+// And an update message:
+//
+//	f {
+//	  b {
+//	    d: 10
+//	  }
+//	  c: [2]
+//	}
+//
+// then if the field mask is:
+//
+//	paths: ["f.b", "f.c"]
+//
+// then the result will be:
+//
+//	f {
+//	  b {
+//	    d: 10
+//	    x: 2
+//	  }
+//	  c: [1, 2]
+//	}
+//
+// An implementation may provide options to override this default behavior for
+// repeated and message fields.
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+//	message Profile {
+//	  User user = 1;
+//	  Photo photo = 2;
+//	}
+//	message User {
+//	  string display_name = 1;
+//	  string address = 2;
+//	}
+//
+// In proto a field mask for `Profile` may look as such:
+//
+//	mask {
+//	  paths: "user.display_name"
+//	  paths: "photo"
+//	}
+//
+// In JSON, the same mask is represented as below:
+//
+//	{
+//	  mask: "user.displayName,photo"
+//	}
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+//	message SampleMessage {
+//	  oneof test_oneof {
+//	    string name = 4;
+//	    SubMessage sub_message = 9;
+//	  }
+//	}
+//
+// The field mask can be:
+//
+//	mask {
+//	  paths: "name"
+//	}
+//
+// Or:
+//
+//	mask {
+//	  paths: "sub_message"
+//	}
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+//
+// ## Field Mask Verification
+//
+// The implementation of any API method which has a FieldMask type field in the
+// request should verify the included field paths, and return an
+// `INVALID_ARGUMENT` error if any path is unmappable.
+type FieldMask struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of field mask paths.
+	Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+// New constructs a field mask from a list of paths and verifies that
+// each one is valid according to the specified message type.
+func New(m proto.Message, paths ...string) (*FieldMask, error) {
+	x := new(FieldMask)
+	return x, x.Append(m, paths...)
+}
+
+// Union returns the union of all the paths in the input field masks.
+func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
+	var out []string
+	out = append(out, mx.GetPaths()...)
+	out = append(out, my.GetPaths()...)
+	for _, m := range ms {
+		out = append(out, m.GetPaths()...)
+	}
+	return &FieldMask{Paths: normalizePaths(out)}
+}
+
+// Intersect returns the intersection of all the paths in the input field masks.
+func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
+	var ss1, ss2 []string // reused buffers for performance
+	intersect := func(out, in []string) []string {
+		ss1 = normalizePaths(append(ss1[:0], in...))
+		ss2 = normalizePaths(append(ss2[:0], out...))
+		out = out[:0]
+		for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {
+			switch s1, s2 := ss1[i1], ss2[i2]; {
+			case hasPathPrefix(s1, s2):
+				out = append(out, s1)
+				i1++
+			case hasPathPrefix(s2, s1):
+				out = append(out, s2)
+				i2++
+			case lessPath(s1, s2):
+				i1++
+			case lessPath(s2, s1):
+				i2++
+			}
+		}
+		return out
+	}
+
+	out := Union(mx, my, ms...).GetPaths()
+	out = intersect(out, mx.GetPaths())
+	out = intersect(out, my.GetPaths())
+	for _, m := range ms {
+		out = intersect(out, m.GetPaths())
+	}
+	return &FieldMask{Paths: normalizePaths(out)}
+}
+
+// IsValid reports whether all the paths are syntactically valid and
+// refer to known fields in the specified message type.
+// It reports false for a nil FieldMask.
+func (x *FieldMask) IsValid(m proto.Message) bool {
+	paths := x.GetPaths()
+	return x != nil && numValidPaths(m, paths) == len(paths)
+}
+
+// Append appends a list of paths to the mask and verifies that each one
+// is valid according to the specified message type.
+// An invalid path is not appended and breaks insertion of subsequent paths.
+func (x *FieldMask) Append(m proto.Message, paths ...string) error {
+	numValid := numValidPaths(m, paths)
+	x.Paths = append(x.Paths, paths[:numValid]...)
+	paths = paths[numValid:]
+	if len(paths) > 0 {
+		name := m.ProtoReflect().Descriptor().FullName()
+		return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name)
+	}
+	return nil
+}
+
+func numValidPaths(m proto.Message, paths []string) int {
+	md0 := m.ProtoReflect().Descriptor()
+	for i, path := range paths {
+		md := md0
+		if !rangeFields(path, func(field string) bool {
+			// Search the field within the message.
+			if md == nil {
+				return false // not within a message
+			}
+			fd := md.Fields().ByName(protoreflect.Name(field))
+			// The real field name of a group is the message name.
+			if fd == nil {
+				gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field)))
+				if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field {
+					fd = gd
+				}
+			} else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field {
+				fd = nil
+			}
+			if fd == nil {
+				return false // message has does not have this field
+			}
+
+			// Identify the next message to search within.
+			md = fd.Message() // may be nil
+
+			// Repeated fields are only allowed at the last position.
+			if fd.IsList() || fd.IsMap() {
+				md = nil
+			}
+
+			return true
+		}) {
+			return i
+		}
+	}
+	return len(paths)
+}
+
+// Normalize converts the mask to its canonical form where all paths are sorted
+// and redundant paths are removed.
+func (x *FieldMask) Normalize() {
+	x.Paths = normalizePaths(x.Paths)
+}
+
+func normalizePaths(paths []string) []string {
+	sort.Slice(paths, func(i, j int) bool {
+		return lessPath(paths[i], paths[j])
+	})
+
+	// Elide any path that is a prefix match on the previous.
+	out := paths[:0]
+	for _, path := range paths {
+		if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {
+			continue
+		}
+		out = append(out, path)
+	}
+	return out
+}
+
+// hasPathPrefix is like strings.HasPrefix, but further checks for either
+// an exact matche or that the prefix is delimited by a dot.
+func hasPathPrefix(path, prefix string) bool {
+	return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')
+}
+
+// lessPath is a lexicographical comparison where dot is specially treated
+// as the smallest symbol.
+func lessPath(x, y string) bool {
+	for i := 0; i < len(x) && i < len(y); i++ {
+		if x[i] != y[i] {
+			return (x[i] - '.') < (y[i] - '.')
+		}
+	}
+	return len(x) < len(y)
+}
+
+// rangeFields is like strings.Split(path, "."), but avoids allocations by
+// iterating over each field in place and calling a iterator function.
+func rangeFields(path string, f func(field string) bool) bool {
+	for {
+		var field string
+		if i := strings.IndexByte(path, '.'); i >= 0 {
+			field, path = path[:i], path[i:]
+		} else {
+			field, path = path, ""
+		}
+
+		if !f(field) {
+			return false
+		}
+
+		if len(path) == 0 {
+			return true
+		}
+		path = strings.TrimPrefix(path, ".")
+	}
+}
+
+func (x *FieldMask) Reset() {
+	*x = FieldMask{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FieldMask) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMask) ProtoMessage() {}
+
+func (x *FieldMask) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
+func (*FieldMask) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldMask) GetPaths() []string {
+	if x != nil {
+		return x.Paths
+	}
+	return nil
+}
+
+var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_field_mask_proto_rawDesc = []byte{
+	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+	0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+	0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+	0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+	0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
+	0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
+	0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
+	file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+)
+
+func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
+	file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+	})
+	return file_google_protobuf_field_mask_proto_rawDescData
+}
+
+var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
+	(*FieldMask)(nil), // 0: google.protobuf.FieldMask
+}
+var file_google_protobuf_field_mask_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_field_mask_proto_init() }
+func file_google_protobuf_field_mask_proto_init() {
+	if File_google_protobuf_field_mask_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FieldMask); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_field_mask_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_field_mask_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_field_mask_proto = out.File
+	file_google_protobuf_field_mask_proto_rawDesc = nil
+	file_google_protobuf_field_mask_proto_goTypes = nil
+	file_google_protobuf_field_mask_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2bac8b88ea1ea15cbf8ba142f7da358d667b4d0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -0,0 +1,810 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+// Package structpb contains generated types for google/protobuf/struct.proto.
+//
+// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
+// used to represent arbitrary JSON. The Value message represents a JSON value,
+// the Struct message represents a JSON object, and the ListValue message
+// represents a JSON array. See https://json.org for more information.
+//
+// The Value, Struct, and ListValue types have generated MarshalJSON and
+// UnmarshalJSON methods such that they serialize JSON equivalent to what the
+// messages themselves represent. Use of these types with the
+// "google.golang.org/protobuf/encoding/protojson" package
+// ensures that they will be serialized as their JSON equivalent.
+//
+// # Conversion to and from a Go interface
+//
+// The standard Go "encoding/json" package has functionality to serialize
+// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
+// ListValue.AsSlice methods can convert the protobuf message representation into
+// a form represented by interface{}, map[string]interface{}, and []interface{}.
+// This form can be used with other packages that operate on such data structures
+// and also directly with the standard json package.
+//
+// In order to convert the interface{}, map[string]interface{}, and []interface{}
+// forms back as Value, Struct, and ListValue messages, use the NewStruct,
+// NewList, and NewValue constructor functions.
+//
+// # Example usage
+//
+// Consider the following example JSON object:
+//
+//	{
+//		"firstName": "John",
+//		"lastName": "Smith",
+//		"isAlive": true,
+//		"age": 27,
+//		"address": {
+//			"streetAddress": "21 2nd Street",
+//			"city": "New York",
+//			"state": "NY",
+//			"postalCode": "10021-3100"
+//		},
+//		"phoneNumbers": [
+//			{
+//				"type": "home",
+//				"number": "212 555-1234"
+//			},
+//			{
+//				"type": "office",
+//				"number": "646 555-4567"
+//			}
+//		],
+//		"children": [],
+//		"spouse": null
+//	}
+//
+// To construct a Value message representing the above JSON object:
+//
+//	m, err := structpb.NewValue(map[string]interface{}{
+//		"firstName": "John",
+//		"lastName":  "Smith",
+//		"isAlive":   true,
+//		"age":       27,
+//		"address": map[string]interface{}{
+//			"streetAddress": "21 2nd Street",
+//			"city":          "New York",
+//			"state":         "NY",
+//			"postalCode":    "10021-3100",
+//		},
+//		"phoneNumbers": []interface{}{
+//			map[string]interface{}{
+//				"type":   "home",
+//				"number": "212 555-1234",
+//			},
+//			map[string]interface{}{
+//				"type":   "office",
+//				"number": "646 555-4567",
+//			},
+//		},
+//		"children": []interface{}{},
+//		"spouse":   nil,
+//	})
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of m as a *structpb.Value
+package structpb
+
+import (
+	base64 "encoding/base64"
+	protojson "google.golang.org/protobuf/encoding/protojson"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	math "math"
+	reflect "reflect"
+	sync "sync"
+	utf8 "unicode/utf8"
+)
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+// Enum value maps for NullValue.
+var (
+	NullValue_name = map[int32]string{
+		0: "NULL_VALUE",
+	}
+	NullValue_value = map[string]int32{
+		"NULL_VALUE": 0,
+	}
+)
+
+func (x NullValue) Enum() *NullValue {
+	p := new(NullValue)
+	*p = x
+	return p
+}
+
+func (x NullValue) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NullValue) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_struct_proto_enumTypes[0].Descriptor()
+}
+
+func (NullValue) Type() protoreflect.EnumType {
+	return &file_google_protobuf_struct_proto_enumTypes[0]
+}
+
+func (x NullValue) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NullValue.Descriptor instead.
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Unordered map of dynamically typed values.
+	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+// NewStruct constructs a Struct from a general-purpose Go map.
+// The map keys must be valid UTF-8.
+// The map values are converted using NewValue.
+func NewStruct(v map[string]interface{}) (*Struct, error) {
+	x := &Struct{Fields: make(map[string]*Value, len(v))}
+	for k, v := range v {
+		if !utf8.ValidString(k) {
+			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k)
+		}
+		var err error
+		x.Fields[k], err = NewValue(v)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return x, nil
+}
+
+// AsMap converts x to a general-purpose Go map.
+// The map values are converted by calling Value.AsInterface.
+func (x *Struct) AsMap() map[string]interface{} {
+	f := x.GetFields()
+	vs := make(map[string]interface{}, len(f))
+	for k, v := range f {
+		vs[k] = v.AsInterface()
+	}
+	return vs
+}
+
+func (x *Struct) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *Struct) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *Struct) Reset() {
+	*x = Struct{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_struct_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Struct) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Struct) ProtoMessage() {}
+
+func (x *Struct) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Struct.ProtoReflect.Descriptor instead.
+func (*Struct) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Struct) GetFields() map[string]*Value {
+	if x != nil {
+		return x.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of these
+// variants. Absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The kind of value.
+	//
+	// Types that are assignable to Kind:
+	//
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+// NewValue constructs a Value from a general-purpose Go interface.
+//
+//	╔════════════════════════╤════════════════════════════════════════════╗
+//	║ Go type                │ Conversion                                 ║
+//	╠════════════════════════╪════════════════════════════════════════════╣
+//	║ nil                    │ stored as NullValue                        ║
+//	║ bool                   │ stored as BoolValue                        ║
+//	║ int, int32, int64      │ stored as NumberValue                      ║
+//	║ uint, uint32, uint64   │ stored as NumberValue                      ║
+//	║ float32, float64       │ stored as NumberValue                      ║
+//	║ string                 │ stored as StringValue; must be valid UTF-8 ║
+//	║ []byte                 │ stored as StringValue; base64-encoded      ║
+//	║ map[string]interface{} │ stored as StructValue                      ║
+//	║ []interface{}          │ stored as ListValue                        ║
+//	╚════════════════════════╧════════════════════════════════════════════╝
+//
+// When converting an int64 or uint64 to a NumberValue, numeric precision loss
+// is possible since they are stored as a float64.
+func NewValue(v interface{}) (*Value, error) {
+	switch v := v.(type) {
+	case nil:
+		return NewNullValue(), nil
+	case bool:
+		return NewBoolValue(v), nil
+	case int:
+		return NewNumberValue(float64(v)), nil
+	case int32:
+		return NewNumberValue(float64(v)), nil
+	case int64:
+		return NewNumberValue(float64(v)), nil
+	case uint:
+		return NewNumberValue(float64(v)), nil
+	case uint32:
+		return NewNumberValue(float64(v)), nil
+	case uint64:
+		return NewNumberValue(float64(v)), nil
+	case float32:
+		return NewNumberValue(float64(v)), nil
+	case float64:
+		return NewNumberValue(float64(v)), nil
+	case string:
+		if !utf8.ValidString(v) {
+			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
+		}
+		return NewStringValue(v), nil
+	case []byte:
+		s := base64.StdEncoding.EncodeToString(v)
+		return NewStringValue(s), nil
+	case map[string]interface{}:
+		v2, err := NewStruct(v)
+		if err != nil {
+			return nil, err
+		}
+		return NewStructValue(v2), nil
+	case []interface{}:
+		v2, err := NewList(v)
+		if err != nil {
+			return nil, err
+		}
+		return NewListValue(v2), nil
+	default:
+		return nil, protoimpl.X.NewError("invalid type: %T", v)
+	}
+}
+
+// NewNullValue constructs a new null Value.
+func NewNullValue() *Value {
+	return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
+}
+
+// NewBoolValue constructs a new boolean Value.
+func NewBoolValue(v bool) *Value {
+	return &Value{Kind: &Value_BoolValue{BoolValue: v}}
+}
+
+// NewNumberValue constructs a new number Value.
+func NewNumberValue(v float64) *Value {
+	return &Value{Kind: &Value_NumberValue{NumberValue: v}}
+}
+
+// NewStringValue constructs a new string Value.
+func NewStringValue(v string) *Value {
+	return &Value{Kind: &Value_StringValue{StringValue: v}}
+}
+
+// NewStructValue constructs a new struct Value.
+func NewStructValue(v *Struct) *Value {
+	return &Value{Kind: &Value_StructValue{StructValue: v}}
+}
+
+// NewListValue constructs a new list Value.
+func NewListValue(v *ListValue) *Value {
+	return &Value{Kind: &Value_ListValue{ListValue: v}}
+}
+
+// AsInterface converts x to a general-purpose Go interface.
+//
+// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce
+// semantically equivalent JSON (assuming no errors occur).
+//
+// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
+// converted as strings to remain compatible with MarshalJSON.
+func (x *Value) AsInterface() interface{} {
+	switch v := x.GetKind().(type) {
+	case *Value_NumberValue:
+		if v != nil {
+			switch {
+			case math.IsNaN(v.NumberValue):
+				return "NaN"
+			case math.IsInf(v.NumberValue, +1):
+				return "Infinity"
+			case math.IsInf(v.NumberValue, -1):
+				return "-Infinity"
+			default:
+				return v.NumberValue
+			}
+		}
+	case *Value_StringValue:
+		if v != nil {
+			return v.StringValue
+		}
+	case *Value_BoolValue:
+		if v != nil {
+			return v.BoolValue
+		}
+	case *Value_StructValue:
+		if v != nil {
+			return v.StructValue.AsMap()
+		}
+	case *Value_ListValue:
+		if v != nil {
+			return v.ListValue.AsSlice()
+		}
+	}
+	return nil
+}
+
+func (x *Value) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *Value) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *Value) Reset() {
+	*x = Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_struct_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (x *Value) GetNullValue() NullValue {
+	if x, ok := x.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (x *Value) GetNumberValue() float64 {
+	if x, ok := x.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (x *Value) GetStringValue() string {
+	if x, ok := x.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *Value) GetBoolValue() bool {
+	if x, ok := x.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *Value) GetStructValue() *Struct {
+	if x, ok := x.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+	if x, ok := x.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	// Represents a null value.
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+	// Represents a double value.
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	// Represents a string value.
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+	// Represents a boolean value.
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+	// Represents a structured value.
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	// Represents a repeated `Value`.
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Repeated field of dynamically typed values.
+	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+// NewList constructs a ListValue from a general-purpose Go slice.
+// The slice elements are converted using NewValue.
+func NewList(v []interface{}) (*ListValue, error) {
+	x := &ListValue{Values: make([]*Value, len(v))}
+	for i, v := range v {
+		var err error
+		x.Values[i], err = NewValue(v)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return x, nil
+}
+
+// AsSlice converts x to a general-purpose Go slice.
+// The slice elements are converted by calling Value.AsInterface.
+func (x *ListValue) AsSlice() []interface{} {
+	vals := x.GetValues()
+	vs := make([]interface{}, len(vals))
+	for i, v := range vals {
+		vs[i] = v.AsInterface()
+	}
+	return vs
+}
+
+func (x *ListValue) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *ListValue) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *ListValue) Reset() {
+	*x = ListValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_struct_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+var File_google_protobuf_struct_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_struct_proto_rawDesc = []byte{
+	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
+	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
+	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
+	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
+	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
+	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
+	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
+	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
+	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
+	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
+	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
+	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
+	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
+	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
+	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
+	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
+	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_struct_proto_rawDescOnce sync.Once
+	file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+)
+
+func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
+	file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+	})
+	return file_google_protobuf_struct_proto_rawDescData
+}
+
+var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_protobuf_struct_proto_goTypes = []interface{}{
+	(NullValue)(0),    // 0: google.protobuf.NullValue
+	(*Struct)(nil),    // 1: google.protobuf.Struct
+	(*Value)(nil),     // 2: google.protobuf.Value
+	(*ListValue)(nil), // 3: google.protobuf.ListValue
+	nil,               // 4: google.protobuf.Struct.FieldsEntry
+}
+var file_google_protobuf_struct_proto_depIdxs = []int32{
+	4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry
+	0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue
+	1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct
+	3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue
+	2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value
+	2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value
+	6, // [6:6] is the sub-list for method output_type
+	6, // [6:6] is the sub-list for method input_type
+	6, // [6:6] is the sub-list for extension type_name
+	6, // [6:6] is the sub-list for extension extendee
+	0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_struct_proto_init() }
+func file_google_protobuf_struct_proto_init() {
+	if File_google_protobuf_struct_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Struct); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   4,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_struct_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_struct_proto_depIdxs,
+		EnumInfos:         file_google_protobuf_struct_proto_enumTypes,
+		MessageInfos:      file_google_protobuf_struct_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_struct_proto = out.File
+	file_google_protobuf_struct_proto_rawDesc = nil
+	file_google_protobuf_struct_proto_goTypes = nil
+	file_google_protobuf_struct_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
new file mode 100644
index 0000000000000000000000000000000000000000..762a87130f83b0e9f52d8a048b0a615ed36c8ee3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -0,0 +1,760 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+//
+// These wrappers have no meaningful use within repeated fields as they lack
+// the ability to detect presence on individual elements.
+// These wrappers have no meaningful use within a map or a oneof since
+// individual entries of a map or fields of a oneof can already detect presence.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrapperspb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The double value.
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Double stores v in a new DoubleValue and returns a pointer to it.
+func Double(v float64) *DoubleValue {
+	return &DoubleValue{Value: v}
+}
+
+func (x *DoubleValue) Reset() {
+	*x = DoubleValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DoubleValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleValue) ProtoMessage() {}
+
+func (x *DoubleValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead.
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DoubleValue) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The float value.
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Float stores v in a new FloatValue and returns a pointer to it.
+func Float(v float32) *FloatValue {
+	return &FloatValue{Value: v}
+}
+
+func (x *FloatValue) Reset() {
+	*x = FloatValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FloatValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FloatValue) ProtoMessage() {}
+
+func (x *FloatValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead.
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FloatValue) GetValue() float32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int64 value.
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int64 stores v in a new Int64Value and returns a pointer to it.
+func Int64(v int64) *Int64Value {
+	return &Int64Value{Value: v}
+}
+
+func (x *Int64Value) Reset() {
+	*x = Int64Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Int64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Value) ProtoMessage() {}
+
+func (x *Int64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead.
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Int64Value) GetValue() int64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint64 value.
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt64 stores v in a new UInt64Value and returns a pointer to it.
+func UInt64(v uint64) *UInt64Value {
+	return &UInt64Value{Value: v}
+}
+
+func (x *UInt64Value) Reset() {
+	*x = UInt64Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UInt64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt64Value) ProtoMessage() {}
+
+func (x *UInt64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead.
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UInt64Value) GetValue() uint64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int32 value.
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int32 stores v in a new Int32Value and returns a pointer to it.
+func Int32(v int32) *Int32Value {
+	return &Int32Value{Value: v}
+}
+
+func (x *Int32Value) Reset() {
+	*x = Int32Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Int32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Value) ProtoMessage() {}
+
+func (x *Int32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead.
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Int32Value) GetValue() int32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint32 value.
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt32 stores v in a new UInt32Value and returns a pointer to it.
+func UInt32(v uint32) *UInt32Value {
+	return &UInt32Value{Value: v}
+}
+
+func (x *UInt32Value) Reset() {
+	*x = UInt32Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UInt32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt32Value) ProtoMessage() {}
+
+func (x *UInt32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead.
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UInt32Value) GetValue() uint32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bool value.
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bool stores v in a new BoolValue and returns a pointer to it.
+func Bool(v bool) *BoolValue {
+	return &BoolValue{Value: v}
+}
+
+func (x *BoolValue) Reset() {
+	*x = BoolValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BoolValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BoolValue) ProtoMessage() {}
+
+func (x *BoolValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead.
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *BoolValue) GetValue() bool {
+	if x != nil {
+		return x.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The string value.
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// String stores v in a new StringValue and returns a pointer to it.
+func String(v string) *StringValue {
+	return &StringValue{Value: v}
+}
+
+func (x *StringValue) Reset() {
+	*x = StringValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *StringValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringValue) ProtoMessage() {}
+
+func (x *StringValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringValue.ProtoReflect.Descriptor instead.
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *StringValue) GetValue() string {
+	if x != nil {
+		return x.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bytes value.
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bytes stores v in a new BytesValue and returns a pointer to it.
+func Bytes(v []byte) *BytesValue {
+	return &BytesValue{Value: v}
+}
+
+func (x *BytesValue) Reset() {
+	*x = BytesValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BytesValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BytesValue) ProtoMessage() {}
+
+func (x *BytesValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead.
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BytesValue) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_wrappers_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
+	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
+	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
+	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
+	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
+	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
+	file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+)
+
+func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
+	file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+	})
+	return file_google_protobuf_wrappers_proto_rawDescData
+}
+
+var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
+	(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
+	(*FloatValue)(nil),  // 1: google.protobuf.FloatValue
+	(*Int64Value)(nil),  // 2: google.protobuf.Int64Value
+	(*UInt64Value)(nil), // 3: google.protobuf.UInt64Value
+	(*Int32Value)(nil),  // 4: google.protobuf.Int32Value
+	(*UInt32Value)(nil), // 5: google.protobuf.UInt32Value
+	(*BoolValue)(nil),   // 6: google.protobuf.BoolValue
+	(*StringValue)(nil), // 7: google.protobuf.StringValue
+	(*BytesValue)(nil),  // 8: google.protobuf.BytesValue
+}
+var file_google_protobuf_wrappers_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_wrappers_proto_init() }
+func file_google_protobuf_wrappers_proto_init() {
+	if File_google_protobuf_wrappers_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DoubleValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FloatValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Int64Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UInt64Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Int32Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UInt32Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BoolValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*StringValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BytesValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   9,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_wrappers_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_wrappers_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_wrappers_proto = out.File
+	file_google_protobuf_wrappers_proto_rawDesc = nil
+	file_google_protobuf_wrappers_proto_goTypes = nil
+	file_google_protobuf_wrappers_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3fa5fa1bcf7863ac77b89abc6f993bc6f651452f..b6b40aa6f30ed8a28f1c4493dbd32acb7a2e6fd4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,6 +1,22 @@
 # github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible
 ## explicit
 github.com/Knetic/govaluate
+# github.com/agoda-com/opentelemetry-logs-go v0.4.3
+## explicit; go 1.20
+github.com/agoda-com/opentelemetry-logs-go
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc
+github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp
+github.com/agoda-com/opentelemetry-logs-go/internal/global
+github.com/agoda-com/opentelemetry-logs-go/logs
+github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env
+github.com/agoda-com/opentelemetry-logs-go/sdk/logs
+github.com/agoda-com/opentelemetry-logs-go/semconv
 # github.com/benbjohnson/clock v1.3.5
 ## explicit; go 1.15
 github.com/benbjohnson/clock
@@ -10,6 +26,9 @@ github.com/beorn7/perks/quantile
 # github.com/caarlos0/env/v6 v6.10.1
 ## explicit; go 1.17
 github.com/caarlos0/env/v6
+# github.com/cenkalti/backoff/v4 v4.2.1
+## explicit; go 1.18
+github.com/cenkalti/backoff/v4
 # github.com/cespare/xxhash/v2 v2.2.0
 ## explicit; go 1.11
 github.com/cespare/xxhash/v2
@@ -60,10 +79,14 @@ github.com/go-kit/log/level
 # github.com/go-logfmt/logfmt v0.5.1
 ## explicit; go 1.17
 github.com/go-logfmt/logfmt
-# github.com/go-logr/logr v1.3.0
+# github.com/go-logr/logr v1.4.1
 ## explicit; go 1.18
 github.com/go-logr/logr
+github.com/go-logr/logr/funcr
 github.com/go-logr/logr/slogr
+# github.com/go-logr/stdr v1.2.2
+## explicit; go 1.16
+github.com/go-logr/stdr
 # github.com/go-openapi/jsonpointer v0.19.6
 ## explicit; go 1.13
 github.com/go-openapi/jsonpointer
@@ -114,19 +137,24 @@ github.com/google/gofuzz/bytesource
 ## explicit; go 1.12
 github.com/google/gopacket
 github.com/google/gopacket/layers
-# github.com/google/uuid v1.4.0
+# github.com/google/uuid v1.5.0
 ## explicit
 github.com/google/uuid
 # github.com/gorilla/websocket v1.5.0
 ## explicit; go 1.12
 github.com/gorilla/websocket
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0
+## explicit; go 1.19
+github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
+github.com/grpc-ecosystem/grpc-gateway/v2/runtime
+github.com/grpc-ecosystem/grpc-gateway/v2/utilities
 # github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb
 ## explicit
 github.com/heptiolabs/healthcheck
 # github.com/imdario/mergo v0.3.15
 ## explicit; go 1.13
 github.com/imdario/mergo
-# github.com/ip2location/ip2location-go/v9 v9.6.0
+# github.com/ip2location/ip2location-go/v9 v9.7.0
 ## explicit; go 1.14
 github.com/ip2location/ip2location-go/v9
 # github.com/josharian/intern v1.0.0
@@ -138,8 +166,8 @@ github.com/jpillora/backoff
 # github.com/json-iterator/go v1.1.12
 ## explicit; go 1.12
 github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.0
-## explicit; go 1.18
+# github.com/klauspost/compress v1.17.4
+## explicit; go 1.19
 github.com/klauspost/compress
 github.com/klauspost/compress/flate
 github.com/klauspost/compress/fse
@@ -151,7 +179,7 @@ github.com/klauspost/compress/s2
 github.com/klauspost/compress/snappy
 github.com/klauspost/compress/zstd
 github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/cpuid/v2 v2.2.5
+# github.com/klauspost/cpuid/v2 v2.2.6
 ## explicit; go 1.15
 github.com/klauspost/cpuid/v2
 # github.com/libp2p/go-reuseport v0.3.0
@@ -165,16 +193,13 @@ github.com/mailru/easyjson/jwriter
 # github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f
 ## explicit; go 1.17
 github.com/mariomac/guara/pkg/test
-# github.com/matttproud/golang_protobuf_extensions v1.0.4
-## explicit; go 1.9
-github.com/matttproud/golang_protobuf_extensions/pbutil
 # github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
 ## explicit; go 1.12
 github.com/mdlayher/ethernet
 # github.com/minio/md5-simd v1.1.2
 ## explicit; go 1.14
 github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.63
+# github.com/minio/minio-go/v7 v7.0.67
 ## explicit; go 1.17
 github.com/minio/minio-go/v7
 github.com/minio/minio-go/v7/pkg/credentials
@@ -212,14 +237,15 @@ github.com/mwitkow/go-conntrack
 # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
 ## explicit
 github.com/mxk/go-flowrate/flowrate
-# github.com/netobserv/flowlogs-pipeline v0.1.11-0.20231123152750-f3b03fa192aa
-## explicit; go 1.19
+# github.com/netobserv/flowlogs-pipeline v0.1.11
+## explicit; go 1.20
 github.com/netobserv/flowlogs-pipeline/pkg/api
 github.com/netobserv/flowlogs-pipeline/pkg/config
 github.com/netobserv/flowlogs-pipeline/pkg/operational
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode
+github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack
@@ -228,11 +254,13 @@ github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni
+github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils
 github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write
 github.com/netobserv/flowlogs-pipeline/pkg/prometheus
+github.com/netobserv/flowlogs-pipeline/pkg/server
 github.com/netobserv/flowlogs-pipeline/pkg/utils
 # github.com/netobserv/gopipes v0.3.0
 ## explicit; go 1.18
@@ -247,7 +275,7 @@ github.com/netobserv/loki-client-go/pkg/labelutil
 github.com/netobserv/loki-client-go/pkg/logproto
 github.com/netobserv/loki-client-go/pkg/metric
 github.com/netobserv/loki-client-go/pkg/urlutil
-# github.com/netsampler/goflow2 v1.3.6
+# github.com/netsampler/goflow2 v1.3.7
 ## explicit; go 1.18
 github.com/netsampler/goflow2/decoders
 github.com/netsampler/goflow2/decoders/netflow
@@ -312,22 +340,22 @@ github.com/pkg/errors
 # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
 ## explicit
 github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.17.0
+# github.com/prometheus/client_golang v1.18.0
 ## explicit; go 1.19
 github.com/prometheus/client_golang/prometheus
 github.com/prometheus/client_golang/prometheus/internal
 github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16
-## explicit; go 1.18
+# github.com/prometheus/client_model v0.5.0
+## explicit; go 1.19
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.44.0
-## explicit; go 1.18
+# github.com/prometheus/common v0.46.0
+## explicit; go 1.20
 github.com/prometheus/common/config
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
 github.com/prometheus/common/model
 github.com/prometheus/common/version
-# github.com/prometheus/procfs v0.11.1
+# github.com/prometheus/procfs v0.12.0
 ## explicit; go 1.19
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
@@ -445,10 +473,93 @@ github.com/xdg-go/scram
 # github.com/xdg-go/stringprep v1.0.4
 ## explicit; go 1.11
 github.com/xdg-go/stringprep
+# go.opentelemetry.io/otel v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel
+go.opentelemetry.io/otel/attribute
+go.opentelemetry.io/otel/baggage
+go.opentelemetry.io/otel/codes
+go.opentelemetry.io/otel/internal
+go.opentelemetry.io/otel/internal/attribute
+go.opentelemetry.io/otel/internal/baggage
+go.opentelemetry.io/otel/internal/global
+go.opentelemetry.io/otel/propagation
+go.opentelemetry.io/otel/semconv/v1.21.0
+go.opentelemetry.io/otel/semconv/v1.24.0
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlptrace
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
+# go.opentelemetry.io/otel/metric v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/metric
+go.opentelemetry.io/otel/metric/embedded
+go.opentelemetry.io/otel/metric/noop
+# go.opentelemetry.io/otel/sdk v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/sdk
+go.opentelemetry.io/otel/sdk/instrumentation
+go.opentelemetry.io/otel/sdk/internal
+go.opentelemetry.io/otel/sdk/internal/env
+go.opentelemetry.io/otel/sdk/resource
+go.opentelemetry.io/otel/sdk/trace
+# go.opentelemetry.io/otel/sdk/metric v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/sdk/metric
+go.opentelemetry.io/otel/sdk/metric/internal
+go.opentelemetry.io/otel/sdk/metric/internal/aggregate
+go.opentelemetry.io/otel/sdk/metric/internal/exemplar
+go.opentelemetry.io/otel/sdk/metric/internal/x
+go.opentelemetry.io/otel/sdk/metric/metricdata
+# go.opentelemetry.io/otel/trace v1.23.1
+## explicit; go 1.20
+go.opentelemetry.io/otel/trace
+go.opentelemetry.io/otel/trace/embedded
+go.opentelemetry.io/otel/trace/noop
+# go.opentelemetry.io/proto/otlp v1.1.0
+## explicit; go 1.17
+go.opentelemetry.io/proto/otlp/collector/logs/v1
+go.opentelemetry.io/proto/otlp/collector/metrics/v1
+go.opentelemetry.io/proto/otlp/collector/trace/v1
+go.opentelemetry.io/proto/otlp/common/v1
+go.opentelemetry.io/proto/otlp/logs/v1
+go.opentelemetry.io/proto/otlp/metrics/v1
+go.opentelemetry.io/proto/otlp/resource/v1
+go.opentelemetry.io/proto/otlp/trace/v1
 # go.uber.org/atomic v1.9.0
 ## explicit; go 1.13
 go.uber.org/atomic
-# golang.org/x/crypto v0.17.0
+# golang.org/x/crypto v0.19.0
 ## explicit; go 1.18
 golang.org/x/crypto/argon2
 golang.org/x/crypto/blake2b
@@ -461,7 +572,7 @@ golang.org/x/crypto/curve25519/internal/field
 golang.org/x/exp/constraints
 golang.org/x/exp/maps
 golang.org/x/exp/slices
-# golang.org/x/net v0.19.0
+# golang.org/x/net v0.21.0
 ## explicit; go 1.18
 golang.org/x/net/context
 golang.org/x/net/html
@@ -476,7 +587,7 @@ golang.org/x/net/internal/timeseries
 golang.org/x/net/proxy
 golang.org/x/net/publicsuffix
 golang.org/x/net/trace
-# golang.org/x/oauth2 v0.14.0
+# golang.org/x/oauth2 v0.16.0
 ## explicit; go 1.18
 golang.org/x/oauth2
 golang.org/x/oauth2/clientcredentials
@@ -487,7 +598,8 @@ golang.org/x/sys/cpu
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
-# golang.org/x/term v0.15.0
+golang.org/x/sys/windows/registry
+# golang.org/x/term v0.17.0
 ## explicit; go 1.18
 golang.org/x/term
 # golang.org/x/text v0.14.0
@@ -496,8 +608,8 @@ golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# golang.org/x/time v0.3.0
-## explicit
+# golang.org/x/time v0.5.0
+## explicit; go 1.18
 golang.org/x/time/rate
 # google.golang.org/appengine v1.6.8
 ## explicit; go 1.11
@@ -508,8 +620,12 @@ google.golang.org/appengine/internal/log
 google.golang.org/appengine/internal/remote_api
 google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/urlfetch
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17
+# google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917
+## explicit; go 1.19
+google.golang.org/genproto/googleapis/api/httpbody
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917
 ## explicit; go 1.19
+google.golang.org/genproto/googleapis/rpc/errdetails
 google.golang.org/genproto/googleapis/rpc/status
 # google.golang.org/grpc v1.61.0
 ## explicit; go 1.19
@@ -527,8 +643,10 @@ google.golang.org/grpc/connectivity
 google.golang.org/grpc/credentials
 google.golang.org/grpc/credentials/insecure
 google.golang.org/grpc/encoding
+google.golang.org/grpc/encoding/gzip
 google.golang.org/grpc/encoding/proto
 google.golang.org/grpc/grpclog
+google.golang.org/grpc/health/grpc_health_v1
 google.golang.org/grpc/internal
 google.golang.org/grpc/internal/backoff
 google.golang.org/grpc/internal/balancer/gracefulswitch
@@ -569,6 +687,7 @@ google.golang.org/grpc/status
 google.golang.org/grpc/tap
 # google.golang.org/protobuf v1.32.0
 ## explicit; go 1.17
+google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
 google.golang.org/protobuf/encoding/protowire
@@ -600,7 +719,10 @@ google.golang.org/protobuf/runtime/protoimpl
 google.golang.org/protobuf/types/descriptorpb
 google.golang.org/protobuf/types/known/anypb
 google.golang.org/protobuf/types/known/durationpb
+google.golang.org/protobuf/types/known/fieldmaskpb
+google.golang.org/protobuf/types/known/structpb
 google.golang.org/protobuf/types/known/timestamppb
+google.golang.org/protobuf/types/known/wrapperspb
 # gopkg.in/inf.v0 v0.9.1
 ## explicit
 gopkg.in/inf.v0