diff --git a/go.mod b/go.mod
index 4654ea155ecffd22a1187f86c78cdfba76eb0353..eb66421630c8669cf1471fc1ad6d899bdf871bfc 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
 	github.com/google/gopacket v1.1.19
 	github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f
 	github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
-	github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240312115357-ddc1f67022a5
+	github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240325100124-fd783b283c7c
 	github.com/netobserv/gopipes v0.3.0
 	github.com/paulbellamy/ratecounter v0.2.0
 	github.com/prometheus/client_golang v1.19.0
diff --git a/go.sum b/go.sum
index 8ab22b59ea5c070e24e8ff309180b22f677c7f5d..f6413fe95cf367f97a4e0be02d5d0fe7baf2d1cd 100644
--- a/go.sum
+++ b/go.sum
@@ -637,8 +637,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
 github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
 github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
 github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240312115357-ddc1f67022a5 h1:eKS116Eks3NT6k3fGTBAxXY1aQmAVOfh/cuT1Qj1KiI=
-github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240312115357-ddc1f67022a5/go.mod h1:OyKXDufQOQjfEpw5StxNfGCNJ2JIUvb8DO3x9jkAfpg=
+github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240325100124-fd783b283c7c h1:QXUnBe5PbAp6crgDHx1HdkwWeEkszHF43aE9pfdgbck=
+github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240325100124-fd783b283c7c/go.mod h1:aiCIZopeZfHuI1/jt/Gg2Cns2y4DOanIVJrOFRergYU=
 github.com/netobserv/gopipes v0.3.0 h1:IYmPnnAVCdSK7VmHmpFhrVBOEm45qpgbZmJz1sSW+60=
 github.com/netobserv/gopipes v0.3.0/go.mod h1:N7/Gz05EOF0CQQSKWsv3eof22Cj2PB08Pbttw98YFYU=
 github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe/ci5q+QdM7upFdxiU+D8F3L3qTd5wXCwwHefw=
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go
index 83f921e86740b8ec8416ff36d1c261f3b4f57c93..711474bbee7e3cdb259ab39f2befc8141b0802b2 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/api.go
@@ -18,48 +18,33 @@
 package api
 
 const (
-	FileType                     = "file"
-	FileLoopType                 = "file_loop"
-	FileChunksType               = "file_chunks"
-	SyntheticType                = "synthetic"
-	CollectorType                = "collector"
-	StdinType                    = "stdin"
-	GRPCType                     = "grpc"
-	FakeType                     = "fake"
-	KafkaType                    = "kafka"
-	S3Type                       = "s3"
-	OtlpLogsType                 = "otlplogs"
-	OtlpMetricsType              = "otlpmetrics"
-	OtlpTracesType               = "otlptraces"
-	StdoutType                   = "stdout"
-	LokiType                     = "loki"
-	IpfixType                    = "ipfix"
-	AggregateType                = "aggregates"
-	TimebasedType                = "timebased"
-	PromType                     = "prom"
-	GenericType                  = "generic"
-	NetworkType                  = "network"
-	FilterType                   = "filter"
-	ConnTrackType                = "conntrack"
-	NoneType                     = "none"
-	AddRegExIfRuleType           = "add_regex_if"
-	AddIfRuleType                = "add_if"
-	AddSubnetRuleType            = "add_subnet"
-	AddLocationRuleType          = "add_location"
-	AddServiceRuleType           = "add_service"
-	AddKubernetesRuleType        = "add_kubernetes"
-	AddKubernetesInfraRuleType   = "add_kubernetes_infra"
-	ReinterpretDirectionRuleType = "reinterpret_direction"
-	PromFilterEqual              = "equal"
-	PromFilterNotEqual           = "not_equal"
-	PromFilterPresence           = "presence"
-	PromFilterAbsence            = "absence"
-	PromFilterRegex              = "match_regex"
-	PromFilterNotRegex           = "not_match_regex"
+	FileType        = "file"
+	FileLoopType    = "file_loop"
+	FileChunksType  = "file_chunks"
+	SyntheticType   = "synthetic"
+	CollectorType   = "collector"
+	StdinType       = "stdin"
+	GRPCType        = "grpc"
+	FakeType        = "fake"
+	KafkaType       = "kafka"
+	S3Type          = "s3"
+	OtlpLogsType    = "otlplogs"
+	OtlpMetricsType = "otlpmetrics"
+	OtlpTracesType  = "otlptraces"
+	StdoutType      = "stdout"
+	LokiType        = "loki"
+	IpfixType       = "ipfix"
+	AggregateType   = "aggregates"
+	TimebasedType   = "timebased"
+	PromType        = "prom"
+	GenericType     = "generic"
+	NetworkType     = "network"
+	FilterType      = "filter"
+	ConnTrackType   = "conntrack"
+	NoneType        = "none"
 
 	TagYaml = "yaml"
 	TagDoc  = "doc"
-	TagEnum = "enum"
 )
 
 // Note: items beginning with doc: "## title" are top level items that get divided into sections inside api.md.
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/conntrack.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/conntrack.go
index ff18998b77a919b419e6b4ca9b1e22eade65d390..cd70895808215d63ee47b18fdbbf5f2c2387eb01 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/conntrack.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/conntrack.go
@@ -22,30 +22,29 @@ import (
 )
 
 const (
-	HashIdFieldName     = "_HashId"
+	HashIDFieldName     = "_HashId"
 	RecordTypeFieldName = "_RecordType"
 	IsFirstFieldName    = "_IsFirst"
 )
 
 type ConnTrack struct {
-	KeyDefinition         KeyDefinition              `yaml:"keyDefinition,omitempty" json:"keyDefinition,omitempty" doc:"fields that are used to identify the connection"`
-	OutputRecordTypes     []string                   `yaml:"outputRecordTypes,omitempty" json:"outputRecordTypes,omitempty" enum:"ConnTrackOutputRecordTypeEnum" doc:"output record types to emit"`
-	OutputFields          []OutputField              `yaml:"outputFields,omitempty" json:"outputFields,omitempty" doc:"list of output fields"`
-	Scheduling            []ConnTrackSchedulingGroup `yaml:"scheduling,omitempty" json:"scheduling,omitempty" doc:"list of timeouts and intervals to apply per selector"`
-	MaxConnectionsTracked int                        `yaml:"maxConnectionsTracked,omitempty" json:"maxConnectionsTracked,omitempty" doc:"maximum number of connections we keep in our cache (0 means no limit)"`
-	TCPFlags              ConnTrackTCPFlags          `yaml:"tcpFlags,omitempty" json:"tcpFlags,omitempty" doc:"settings for handling TCP flags"`
+	KeyDefinition         KeyDefinition                   `yaml:"keyDefinition,omitempty" json:"keyDefinition,omitempty" doc:"fields that are used to identify the connection"`
+	OutputRecordTypes     []ConnTrackOutputRecordTypeEnum `yaml:"outputRecordTypes,omitempty" json:"outputRecordTypes,omitempty" doc:"(enum) output record types to emit"`
+	OutputFields          []OutputField                   `yaml:"outputFields,omitempty" json:"outputFields,omitempty" doc:"list of output fields"`
+	Scheduling            []ConnTrackSchedulingGroup      `yaml:"scheduling,omitempty" json:"scheduling,omitempty" doc:"list of timeouts and intervals to apply per selector"`
+	MaxConnectionsTracked int                             `yaml:"maxConnectionsTracked,omitempty" json:"maxConnectionsTracked,omitempty" doc:"maximum number of connections we keep in our cache (0 means no limit)"`
+	TCPFlags              ConnTrackTCPFlags               `yaml:"tcpFlags,omitempty" json:"tcpFlags,omitempty" doc:"settings for handling TCP flags"`
 }
 
-type ConnTrackOutputRecordTypeEnum struct {
-	NewConnection string `yaml:"newConnection" json:"newConnection" doc:"New connection"`
-	EndConnection string `yaml:"endConnection" json:"endConnection" doc:"End connection"`
-	Heartbeat     string `yaml:"heartbeat" json:"heartbeat" doc:"Heartbeat"`
-	FlowLog       string `yaml:"flowLog" json:"flowLog" doc:"Flow log"`
-}
+type ConnTrackOutputRecordTypeEnum string
 
-func ConnTrackOutputRecordTypeName(operation string) string {
-	return GetEnumName(ConnTrackOutputRecordTypeEnum{}, operation)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	ConnTrackNewConnection ConnTrackOutputRecordTypeEnum = "newConnection" // New connection
+	ConnTrackEndConnection ConnTrackOutputRecordTypeEnum = "endConnection" // End connection
+	ConnTrackHeartbeat     ConnTrackOutputRecordTypeEnum = "heartbeat"     // Heartbeat
+	ConnTrackFlowLog       ConnTrackOutputRecordTypeEnum = "flowLog"       // Flow log
+)
 
 type KeyDefinition struct {
 	FieldGroups []FieldGroup  `yaml:"fieldGroups,omitempty" json:"fieldGroups,omitempty" doc:"list of field group definitions"`
@@ -70,21 +69,24 @@ type ConnTrackHash struct {
 }
 
 type OutputField struct {
-	Name          string `yaml:"name,omitempty" json:"name,omitempty" doc:"output field name"`
-	Operation     string `yaml:"operation,omitempty" json:"operation,omitempty" enum:"ConnTrackOperationEnum" doc:"aggregate operation on the field value"`
-	SplitAB       bool   `yaml:"splitAB,omitempty" json:"splitAB,omitempty" doc:"When true, 2 output fields will be created. One for A->B and one for B->A flows."`
-	Input         string `yaml:"input,omitempty" json:"input,omitempty" doc:"The input field to base the operation on. When omitted, 'name' is used"`
-	ReportMissing bool   `yaml:"reportMissing,omitempty" json:"reportMissing,omitempty" doc:"When true, missing input will produce MissingFieldError metric and error logs"`
+	Name          string                 `yaml:"name,omitempty" json:"name,omitempty" doc:"output field name"`
+	Operation     ConnTrackOperationEnum `yaml:"operation,omitempty" json:"operation,omitempty" doc:"(enum) aggregate operation on the field value"`
+	SplitAB       bool                   `yaml:"splitAB,omitempty" json:"splitAB,omitempty" doc:"When true, 2 output fields will be created. One for A->B and one for B->A flows."`
+	Input         string                 `yaml:"input,omitempty" json:"input,omitempty" doc:"The input field to base the operation on. When omitted, 'name' is used"`
+	ReportMissing bool                   `yaml:"reportMissing,omitempty" json:"reportMissing,omitempty" doc:"When true, missing input will produce MissingFieldError metric and error logs"`
 }
 
-type ConnTrackOperationEnum struct {
-	Sum   string `yaml:"sum" json:"sum" doc:"sum"`
-	Count string `yaml:"count" json:"count" doc:"count"`
-	Min   string `yaml:"min" json:"min" doc:"min"`
-	Max   string `yaml:"max" json:"max" doc:"max"`
-	First string `yaml:"first" json:"first" doc:"first"`
-	Last  string `yaml:"last" json:"last" doc:"last"`
-}
+type ConnTrackOperationEnum string
+
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	ConnTrackSum   ConnTrackOperationEnum = "sum"   // sum
+	ConnTrackCount ConnTrackOperationEnum = "count" // count
+	ConnTrackMin   ConnTrackOperationEnum = "min"   // min
+	ConnTrackMax   ConnTrackOperationEnum = "max"   // max
+	ConnTrackFirst ConnTrackOperationEnum = "first" // first
+	ConnTrackLast  ConnTrackOperationEnum = "last"  // last
+)
 
 type ConnTrackSchedulingGroup struct {
 	Selector             map[string]interface{} `yaml:"selector,omitempty" json:"selector,omitempty" doc:"key-value map to match against connection fields to apply this scheduling"`
@@ -93,16 +95,13 @@ type ConnTrackSchedulingGroup struct {
 	HeartbeatInterval    Duration               `yaml:"heartbeatInterval,omitempty" json:"heartbeatInterval,omitempty" doc:"duration of time to wait between heartbeat reports of a connection"`
 }
 
-func ConnTrackOperationName(operation string) string {
-	return GetEnumName(ConnTrackOperationEnum{}, operation)
-}
-
 type ConnTrackTCPFlags struct {
 	FieldName           string `yaml:"fieldName,omitempty" json:"fieldName,omitempty" doc:"name of the field containing TCP flags"`
 	DetectEndConnection bool   `yaml:"detectEndConnection,omitempty" json:"detectEndConnection,omitempty" doc:"detect end connections by FIN flag"`
 	SwapAB              bool   `yaml:"swapAB,omitempty" json:"swapAB,omitempty" doc:"swap source and destination when the first flowlog contains the SYN_ACK flag"`
 }
 
+//nolint:cyclop
 func (ct *ConnTrack) Validate() error {
 	isGroupAEmpty := ct.KeyDefinition.Hash.FieldGroupARef == ""
 	isGroupBEmpty := ct.KeyDefinition.Hash.FieldGroupBRef == ""
@@ -254,14 +253,14 @@ func addToSet(set map[string]struct{}, item string) bool {
 	return true
 }
 
-func isOperationValid(value string, splitAB bool) bool {
+func isOperationValid(value ConnTrackOperationEnum, splitAB bool) bool {
 	valid := true
 	switch value {
-	case ConnTrackOperationName("Sum"):
-	case ConnTrackOperationName("Count"):
-	case ConnTrackOperationName("Min"):
-	case ConnTrackOperationName("Max"):
-	case ConnTrackOperationName("First"), ConnTrackOperationName("Last"):
+	case ConnTrackSum:
+	case ConnTrackCount:
+	case ConnTrackMin:
+	case ConnTrackMax:
+	case ConnTrackFirst, ConnTrackLast:
 		valid = !splitAB
 	default:
 		valid = false
@@ -269,13 +268,13 @@ func isOperationValid(value string, splitAB bool) bool {
 	return valid
 }
 
-func isOutputRecordTypeValid(value string) bool {
+func isOutputRecordTypeValid(value ConnTrackOutputRecordTypeEnum) bool {
 	valid := true
 	switch value {
-	case ConnTrackOutputRecordTypeName("NewConnection"):
-	case ConnTrackOutputRecordTypeName("EndConnection"):
-	case ConnTrackOutputRecordTypeName("Heartbeat"):
-	case ConnTrackOutputRecordTypeName("FlowLog"):
+	case ConnTrackNewConnection:
+	case ConnTrackEndConnection:
+	case ConnTrackHeartbeat:
+	case ConnTrackFlowLog:
 	default:
 		valid = false
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/decoder.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/decoder.go
index cf6a2f83598821f9ce5e0b9a0dce0f8ee5aa884f..8772c06e6b8d01cc2d0b6d425c805ca7e87f577a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/decoder.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/decoder.go
@@ -1,14 +1,13 @@
 package api
 
 type Decoder struct {
-	Type string `yaml:"type" json:"type" enum:"DecoderEnum" doc:"one of the following:"`
+	Type DecoderEnum `yaml:"type" json:"type" doc:"(enum) one of the following:"`
 }
 
-type DecoderEnum struct {
-	JSON     string `yaml:"json" json:"json" doc:"JSON decoder"`
-	Protobuf string `yaml:"protobuf" json:"protobuf" doc:"Protobuf decoder"`
-}
+type DecoderEnum string
 
-func DecoderName(decoder string) string {
-	return GetEnumName(DecoderEnum{}, decoder)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	DecoderJSON     DecoderEnum = "json"     // JSON decoder
+	DecoderProtobuf DecoderEnum = "protobuf" // Protobuf decoder
+)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_kafka.go
index f03f9faec7b6f653d20d1f35490abb5bc0a2c495..d136bcbed30fdf2f42a39489aac6775709ede623 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_kafka.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_kafka.go
@@ -18,25 +18,24 @@
 package api
 
 type EncodeKafka struct {
-	Address      string      `yaml:"address" json:"address" doc:"address of kafka server"`
-	Topic        string      `yaml:"topic" json:"topic" doc:"kafka topic to write to"`
-	Balancer     string      `yaml:"balancer,omitempty" json:"balancer,omitempty" enum:"KafkaEncodeBalancerEnum" doc:"one of the following:"`
-	WriteTimeout int64       `yaml:"writeTimeout,omitempty" json:"writeTimeout,omitempty" doc:"timeout (in seconds) for write operation performed by the Writer"`
-	ReadTimeout  int64       `yaml:"readTimeout,omitempty" json:"readTimeout,omitempty" doc:"timeout (in seconds) for read operation performed by the Writer"`
-	BatchBytes   int64       `yaml:"batchBytes,omitempty" json:"batchBytes,omitempty" doc:"limit the maximum size of a request in bytes before being sent to a partition"`
-	BatchSize    int         `yaml:"batchSize,omitempty" json:"batchSize,omitempty" doc:"limit on how many messages will be buffered before being sent to a partition"`
-	TLS          *ClientTLS  `yaml:"tls" json:"tls" doc:"TLS client configuration (optional)"`
-	SASL         *SASLConfig `yaml:"sasl" json:"sasl" doc:"SASL configuration (optional)"`
+	Address      string                  `yaml:"address" json:"address" doc:"address of kafka server"`
+	Topic        string                  `yaml:"topic" json:"topic" doc:"kafka topic to write to"`
+	Balancer     KafkaEncodeBalancerEnum `yaml:"balancer,omitempty" json:"balancer,omitempty" doc:"(enum) one of the following:"`
+	WriteTimeout int64                   `yaml:"writeTimeout,omitempty" json:"writeTimeout,omitempty" doc:"timeout (in seconds) for write operation performed by the Writer"`
+	ReadTimeout  int64                   `yaml:"readTimeout,omitempty" json:"readTimeout,omitempty" doc:"timeout (in seconds) for read operation performed by the Writer"`
+	BatchBytes   int64                   `yaml:"batchBytes,omitempty" json:"batchBytes,omitempty" doc:"limit the maximum size of a request in bytes before being sent to a partition"`
+	BatchSize    int                     `yaml:"batchSize,omitempty" json:"batchSize,omitempty" doc:"limit on how many messages will be buffered before being sent to a partition"`
+	TLS          *ClientTLS              `yaml:"tls" json:"tls" doc:"TLS client configuration (optional)"`
+	SASL         *SASLConfig             `yaml:"sasl" json:"sasl" doc:"SASL configuration (optional)"`
 }
 
-type KafkaEncodeBalancerEnum struct {
-	RoundRobin string `yaml:"roundRobin" json:"roundRobin" doc:"RoundRobin balancer"`
-	LeastBytes string `yaml:"leastBytes" json:"leastBytes" doc:"LeastBytes balancer"`
-	Hash       string `yaml:"hash" json:"hash" doc:"Hash balancer"`
-	Crc32      string `yaml:"crc32" json:"crc32" doc:"Crc32 balancer"`
-	Murmur2    string `yaml:"murmur2" json:"murmur2" doc:"Murmur2 balancer"`
-}
+type KafkaEncodeBalancerEnum string
 
-func KafkaEncodeBalancerName(operation string) string {
-	return GetEnumName(KafkaEncodeBalancerEnum{}, operation)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	KafkaRoundRobin KafkaEncodeBalancerEnum = "roundRobin" // RoundRobin balancer
+	KafkaLeastBytes KafkaEncodeBalancerEnum = "leastBytes" // LeastBytes balancer
+	KafkaHash       KafkaEncodeBalancerEnum = "hash"       // Hash balancer
+	KafkaCrc32      KafkaEncodeBalancerEnum = "crc32"      // Crc32 balancer
+	KafkaMurmur2    KafkaEncodeBalancerEnum = "murmur2"    // Murmur2 balancer
+)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go
index 001dab3f7e7d091ac5bc276ed4f4f8efbd6cc5c6..b17628cc7761f30bb16b528aba92ce2110e9d4f4 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_prom.go
@@ -30,16 +30,15 @@ type PromEncode struct {
 	MaxMetrics          int          `yaml:"maxMetrics,omitempty" json:"maxMetrics,omitempty" doc:"maximum number of metrics to report (default: unlimited)"`
 }
 
-type MetricEncodeOperationEnum struct {
-	Gauge        string `yaml:"gauge" json:"gauge" doc:"single numerical value that can arbitrarily go up and down"`
-	Counter      string `yaml:"counter" json:"counter" doc:"monotonically increasing counter whose value can only increase"`
-	Histogram    string `yaml:"histogram" json:"histogram" doc:"counts samples in configurable buckets"`
-	AggHistogram string `yaml:"agg_histogram" json:"agg_histogram" doc:"counts samples in configurable buckets, pre-aggregated via an Aggregate stage"`
-}
+type MetricEncodeOperationEnum string
 
-func MetricEncodeOperationName(operation string) string {
-	return GetEnumName(MetricEncodeOperationEnum{}, operation)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	MetricGauge        MetricEncodeOperationEnum = "gauge"         // single numerical value that can arbitrarily go up and down
+	MetricCounter      MetricEncodeOperationEnum = "counter"       // monotonically increasing counter whose value can only increase
+	MetricHistogram    MetricEncodeOperationEnum = "histogram"     // counts samples in configurable buckets
+	MetricAggHistogram MetricEncodeOperationEnum = "agg_histogram" // counts samples in configurable buckets, pre-aggregated via an Aggregate stage
+)
 
 type PromConnectionInfo struct {
 	Address string       `yaml:"address,omitempty" json:"address,omitempty" doc:"endpoint address to expose"`
@@ -48,32 +47,30 @@ type PromConnectionInfo struct {
 }
 
 type MetricsItem struct {
-	Name       string          `yaml:"name" json:"name" doc:"the metric name"`
-	Type       string          `yaml:"type" json:"type" enum:"MetricEncodeOperationEnum" doc:"one of the following:"`
-	Filters    []MetricsFilter `yaml:"filters" json:"filters" doc:"a list of criteria to filter entries by"`
-	ValueKey   string          `yaml:"valueKey" json:"valueKey" doc:"entry key from which to resolve metric value"`
-	Labels     []string        `yaml:"labels" json:"labels" doc:"labels to be associated with the metric"`
-	Buckets    []float64       `yaml:"buckets" json:"buckets" doc:"histogram buckets"`
-	ValueScale float64         `yaml:"valueScale" json:"valueScale" doc:"scale factor of the value (MetricVal := FlowVal / Scale)"`
+	Name       string                    `yaml:"name" json:"name" doc:"the metric name"`
+	Type       MetricEncodeOperationEnum `yaml:"type" json:"type" doc:"(enum) one of the following:"`
+	Filters    []MetricsFilter           `yaml:"filters" json:"filters" doc:"a list of criteria to filter entries by"`
+	ValueKey   string                    `yaml:"valueKey" json:"valueKey" doc:"entry key from which to resolve metric value"`
+	Labels     []string                  `yaml:"labels" json:"labels" doc:"labels to be associated with the metric"`
+	Buckets    []float64                 `yaml:"buckets" json:"buckets" doc:"histogram buckets"`
+	ValueScale float64                   `yaml:"valueScale,omitempty" json:"valueScale,omitempty" doc:"scale factor of the value (MetricVal := FlowVal / Scale)"`
 }
 
 type MetricsItems []MetricsItem
+type MetricFilterEnum string
 
-type MetricsFilter struct {
-	Key   string `yaml:"key" json:"key" doc:"the key to match and filter by"`
-	Value string `yaml:"value" json:"value" doc:"the value to match and filter by"`
-	Type  string `yaml:"type" json:"type" enum:"MetricEncodeFilterTypeEnum" doc:"the type of filter match: equal (default), not_equal, presence, absence, match_regex or not_match_regex"`
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	MetricFilterEqual    MetricFilterEnum = "equal"           // match exactly the provided filter value
+	MetricFilterNotEqual MetricFilterEnum = "not_equal"       // the value must be different from the provided filter
+	MetricFilterPresence MetricFilterEnum = "presence"        // filter key must be present (filter value is ignored)
+	MetricFilterAbsence  MetricFilterEnum = "absence"         // filter key must be absent (filter value is ignored)
+	MetricFilterRegex    MetricFilterEnum = "match_regex"     // match filter value as a regular expression
+	MetricFilterNotRegex MetricFilterEnum = "not_match_regex" // the filter value must not match the provided regular expression
+)
 
-type MetricEncodeFilterTypeEnum struct {
-	Equal         string `yaml:"equal" json:"equal" doc:"match exactly the provided filter value"`
-	NotEqual      string `yaml:"not_equal" json:"not_equal" doc:"the value must be different from the provided filter"`
-	Presence      string `yaml:"presence" json:"presence" doc:"filter key must be present (filter value is ignored)"`
-	Absence       string `yaml:"absence" json:"absence" doc:"filter key must be absent (filter value is ignored)"`
-	MatchRegex    string `yaml:"match_regex" json:"match_regex" doc:"match filter value as a regular expression"`
-	NotMatchRegex string `yaml:"not_match_regex" json:"not_match_regex" doc:"the filter value must not match the provided regular expression"`
-}
-
-func MetricEncodeFilterTypeName(t string) string {
-	return GetEnumName(MetricEncodeFilterTypeEnum{}, t)
+type MetricsFilter struct {
+	Key   string           `yaml:"key" json:"key" doc:"the key to match and filter by"`
+	Value string           `yaml:"value" json:"value" doc:"the value to match and filter by"`
+	Type  MetricFilterEnum `yaml:"type,omitempty" json:"type,omitempty" doc:"the type of filter match (enum)"`
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_s3.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_s3.go
index e3687f7e7e104417a329243e6b93cdebf5b1d606..346bbb1a632f5b87fcabb63d1326fdeb611a5f5a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_s3.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/encode_s3.go
@@ -20,7 +20,7 @@ package api
 type EncodeS3 struct {
 	Account                string                 `yaml:"account" json:"account" doc:"tenant id for this flow collector"`
 	Endpoint               string                 `yaml:"endpoint" json:"endpoint" doc:"address of s3 server"`
-	AccessKeyId            string                 `yaml:"accessKeyId" json:"accessKeyId" doc:"username to connect to server"`
+	AccessKeyID            string                 `yaml:"accessKeyId" json:"accessKeyId" doc:"username to connect to server"`
 	SecretAccessKey        string                 `yaml:"secretAccessKey" json:"secretAccessKey" doc:"password to connect to server"`
 	Bucket                 string                 `yaml:"bucket" json:"bucket" doc:"bucket into which to store objects"`
 	WriteTimeout           Duration               `yaml:"writeTimeout,omitempty" json:"writeTimeout,omitempty" doc:"timeout (in seconds) for write operation"`
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go
deleted file mode 100644
index 80b94c5a6872ba08e2d97838f915044601c90398..0000000000000000000000000000000000000000
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/enum.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2022 IBM, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package api
-
-import (
-	"log"
-	"reflect"
-)
-
-type enums struct {
-	MetricEncodeOperationEnum     MetricEncodeOperationEnum
-	MetricEncodeFilterTypeEnum    MetricEncodeFilterTypeEnum
-	TransformNetworkOperationEnum TransformNetworkOperationEnum
-	TransformFilterOperationEnum  TransformFilterOperationEnum
-	TransformGenericOperationEnum TransformGenericOperationEnum
-	KafkaEncodeBalancerEnum       KafkaEncodeBalancerEnum
-	SASLTypeEnum                  SASLTypeEnum
-	ConnTrackOperationEnum        ConnTrackOperationEnum
-	ConnTrackOutputRecordTypeEnum ConnTrackOutputRecordTypeEnum
-	DecoderEnum                   DecoderEnum
-	FilterOperationEnum           FilterOperationEnum
-}
-
-type enumNameCacheKey struct {
-	enum      interface{}
-	operation string
-}
-
-var enumNamesCache = map[enumNameCacheKey]string{}
-
-func init() {
-	populateEnumCache()
-}
-
-func populateEnumCache() {
-	enumStruct := enums{}
-	e := reflect.ValueOf(&enumStruct).Elem()
-	for i := 0; i < e.NumField(); i++ {
-		eType := e.Type().Field(i).Type
-		eValue := e.Field(i).Interface()
-		for j := 0; j < eType.NumField(); j++ {
-			fName := eType.Field(j).Name
-			key := enumNameCacheKey{enum: eValue, operation: fName}
-			d := reflect.ValueOf(eValue)
-			field, _ := d.Type().FieldByName(fName)
-			tag := field.Tag.Get(TagYaml)
-			enumNamesCache[key] = tag
-		}
-	}
-}
-
-// GetEnumName gets the name of an enum value from the representing enum struct based on `TagYaml` tag.
-func GetEnumName(enum interface{}, operation string) string {
-	key := enumNameCacheKey{enum: enum, operation: operation}
-	cachedValue, found := enumNamesCache[key]
-	if found {
-		return cachedValue
-	} else {
-		log.Panicf("can't find name '%s' in enum %v", operation, enum)
-		return ""
-	}
-}
-
-// GetEnumReflectionTypeByFieldName gets the enum struct `reflection Type` from the name of the struct (using fields from `enums{}` struct).
-func GetEnumReflectionTypeByFieldName(enumName string) reflect.Type {
-	d := reflect.ValueOf(enums{})
-	field, found := d.Type().FieldByName(enumName)
-	if !found {
-		log.Panicf("can't find enumName %s in enums", enumName)
-		return nil
-	}
-
-	return field.Type
-}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/extract_timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/extract_timebased.go
index 23d749974ef9b27d6467010cefabff89cd81a959..8d9605e628b3a8a1edf6094806caa43952fbe223 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/extract_timebased.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/extract_timebased.go
@@ -17,31 +17,30 @@
 
 package api
 
-type FilterOperationEnum struct {
-	FilterOperationSum  string `yaml:"sum" json:"sum" doc:"set output field to sum of parameters fields in the time window"`
-	FilterOperationAvg  string `yaml:"avg" json:"avg" doc:"set output field to average of parameters fields in the time window"`
-	FilterOperationMin  string `yaml:"min" json:"min" doc:"set output field to minimum of parameters fields in the time window"`
-	FilterOperationMax  string `yaml:"max" json:"max" doc:"set output field to maximum of parameters fields in the time window"`
-	FilterOperationCnt  string `yaml:"count" json:"count" doc:"set output field to number of flows registered in the time window"`
-	FilterOperationLast string `yaml:"last" json:"last" doc:"set output field to last of parameters fields in the time window"`
-	FilterOperationDiff string `yaml:"diff" json:"diff" doc:"set output field to the difference of the first and last parameters fields in the time window"`
-}
+type FilterOperationEnum string
 
-func FilterOperationName(operation string) string {
-	return GetEnumName(FilterOperationEnum{}, operation)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	FilterOperationSum  FilterOperationEnum = "sum"   // set output field to sum of parameters fields in the time window
+	FilterOperationAvg  FilterOperationEnum = "avg"   // set output field to average of parameters fields in the time window
+	FilterOperationMin  FilterOperationEnum = "min"   // set output field to minimum of parameters fields in the time window
+	FilterOperationMax  FilterOperationEnum = "max"   // set output field to maximum of parameters fields in the time window
+	FilterOperationCnt  FilterOperationEnum = "count" // set output field to number of flows registered in the time window
+	FilterOperationLast FilterOperationEnum = "last"  // set output field to last of parameters fields in the time window
+	FilterOperationDiff FilterOperationEnum = "diff"  // set output field to the difference of the first and last parameters fields in the time window
+)
 
 type ExtractTimebased struct {
 	Rules []TimebasedFilterRule `yaml:"rules,omitempty" json:"rules,omitempty" doc:"list of filter rules, each includes:"`
 }
 
 type TimebasedFilterRule struct {
-	Name          string   `yaml:"name,omitempty" json:"name,omitempty" doc:"description of filter result"`
-	IndexKey      string   `yaml:"indexKey,omitempty" json:"indexKey,omitempty" doc:"internal field to index TopK. Deprecated, use indexKeys instead"`
-	IndexKeys     []string `yaml:"indexKeys,omitempty" json:"indexKeys,omitempty" doc:"internal fields to index TopK"`
-	OperationType string   `yaml:"operationType,omitempty" json:"operationType,omitempty" enum:"FilterOperationEnum" doc:"sum, min, max, avg, count, last or diff"`
-	OperationKey  string   `yaml:"operationKey,omitempty" json:"operationKey,omitempty" doc:"internal field on which to perform the operation"`
-	TopK          int      `yaml:"topK,omitempty" json:"topK,omitempty" doc:"number of highest incidence to report (default - report all)"`
-	Reversed      bool     `yaml:"reversed,omitempty" json:"reversed,omitempty" doc:"report lowest incidence instead of highest (default - false)"`
-	TimeInterval  Duration `yaml:"timeInterval,omitempty" json:"timeInterval,omitempty" doc:"time duration of data to use to compute the metric"`
+	Name          string              `yaml:"name,omitempty" json:"name,omitempty" doc:"description of filter result"`
+	IndexKey      string              `yaml:"indexKey,omitempty" json:"indexKey,omitempty" doc:"internal field to index TopK. Deprecated, use indexKeys instead"`
+	IndexKeys     []string            `yaml:"indexKeys,omitempty" json:"indexKeys,omitempty" doc:"internal fields to index TopK"`
+	OperationType FilterOperationEnum `yaml:"operationType,omitempty" json:"operationType,omitempty" doc:"(enum) sum, min, max, avg, count, last or diff"`
+	OperationKey  string              `yaml:"operationKey,omitempty" json:"operationKey,omitempty" doc:"internal field on which to perform the operation"`
+	TopK          int                 `yaml:"topK,omitempty" json:"topK,omitempty" doc:"number of highest incidence to report (default - report all)"`
+	Reversed      bool                `yaml:"reversed,omitempty" json:"reversed,omitempty" doc:"report lowest incidence instead of highest (default - false)"`
+	TimeInterval  Duration            `yaml:"timeInterval,omitempty" json:"timeInterval,omitempty" doc:"time duration of data to use to compute the metric"`
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_kafka.go
index a9823c0325032d30bfd71fd7b66a1cae5e91ba43..0234b21c3b6ae2d873a4625a6a1ad0b24cacfa0a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_kafka.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_kafka.go
@@ -20,7 +20,7 @@ package api
 type IngestKafka struct {
 	Brokers           []string    `yaml:"brokers,omitempty" json:"brokers,omitempty" doc:"list of kafka broker addresses"`
 	Topic             string      `yaml:"topic,omitempty" json:"topic,omitempty" doc:"kafka topic to listen on"`
-	GroupId           string      `yaml:"groupid,omitempty" json:"groupid,omitempty" doc:"separate groupid for each consumer on specified topic"`
+	GroupID           string      `yaml:"groupid,omitempty" json:"groupid,omitempty" doc:"separate groupid for each consumer on specified topic"`
 	GroupBalancers    []string    `yaml:"groupBalancers,omitempty" json:"groupBalancers,omitempty" doc:"list of balancing strategies (range, roundRobin, rackAffinity)"`
 	StartOffset       string      `yaml:"startOffset,omitempty" json:"startOffset,omitempty" doc:"FirstOffset (least recent - default) or LastOffset (most recent) offset available for a partition"`
 	BatchReadTimeout  int64       `yaml:"batchReadTimeout,omitempty" json:"batchReadTimeout,omitempty" doc:"how often (in milliseconds) to process input"`
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/sasl.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/sasl.go
index e5717b0e64c6ae828ba814217f8def7036d7e607..b00544f30a54c282a108d5635f4675a729872cce 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/sasl.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/sasl.go
@@ -1,16 +1,15 @@
 package api
 
 type SASLConfig struct {
-	Type             string
-	ClientIDPath     string `yaml:"clientIDPath,omitempty" json:"clientIDPath,omitempty" doc:"path to the client ID / SASL username"`
-	ClientSecretPath string `yaml:"clientSecretPath,omitempty" json:"clientSecretPath,omitempty" doc:"path to the client secret / SASL password"`
+	Type             SASLTypeEnum `yaml:"type,omitempty" json:"type,omitempty" doc:"SASL type"`
+	ClientIDPath     string       `yaml:"clientIDPath,omitempty" json:"clientIDPath,omitempty" doc:"path to the client ID / SASL username"`
+	ClientSecretPath string       `yaml:"clientSecretPath,omitempty" json:"clientSecretPath,omitempty" doc:"path to the client secret / SASL password"`
 }
 
-type SASLTypeEnum struct {
-	Plain       string `yaml:"plain" json:"plain" doc:"Plain SASL"`
-	ScramSHA512 string `yaml:"scramSHA512" json:"scramSHA512" doc:"SCRAM/SHA512 SASL"`
-}
+type SASLTypeEnum string
 
-func SASLTypeName(operation string) string {
-	return GetEnumName(SASLTypeEnum{}, operation)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	SASLPlain       SASLTypeEnum = "plain"       // Plain SASL
+	SASLScramSHA512 SASLTypeEnum = "scramSHA512" // SCRAM/SHA512 SASL
+)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/tls.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/tls.go
index 8e57e3273b641231eaca783af062c804502a4829..5d8abfbf965b7c8e460856c6b8098f272616fc6a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/tls.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/tls.go
@@ -58,7 +58,7 @@ func (c *ClientTLS) Build() (*tls.Config, error) {
 			}
 			tlsConfig.Certificates = []tls.Certificate{pair}
 		} else if c.UserCertPath != "" || c.UserKeyPath != "" {
-			return nil, errors.New("userCertPath and userKeyPath must be both present or both absent.")
+			return nil, errors.New("userCertPath and userKeyPath must be both present or both absent")
 		}
 		return tlsConfig, nil
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_filter.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_filter.go
index 8029df23ddfe085a6a846923dd35052cc3827beb..f2b995fde956dfa6f421f98fa1e80080f9b5b733 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_filter.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_filter.go
@@ -21,29 +21,46 @@ type TransformFilter struct {
 	Rules []TransformFilterRule `yaml:"rules,omitempty" json:"rules,omitempty" doc:"list of filter rules, each includes:"`
 }
 
-type TransformFilterOperationEnum struct {
-	RemoveField              string `yaml:"remove_field" json:"remove_field" doc:"removes the field from the entry"`
-	RemoveEntryIfExists      string `yaml:"remove_entry_if_exists" json:"remove_entry_if_exists" doc:"removes the entry if the field exists"`
-	RemoveEntryIfDoesntExist string `yaml:"remove_entry_if_doesnt_exist" json:"remove_entry_if_doesnt_exist" doc:"removes the entry if the field does not exist"`
-	RemoveEntryIfEqual       string `yaml:"remove_entry_if_equal" json:"remove_entry_if_equal" doc:"removes the entry if the field value equals specified value"`
-	RemoveEntryIfNotEqual    string `yaml:"remove_entry_if_not_equal" json:"remove_entry_if_not_equal" doc:"removes the entry if the field value does not equal specified value"`
-	AddField                 string `yaml:"add_field" json:"add_field" doc:"adds (input) field to the entry; overrides previous value if present (key=input, value=value)"`
-	AddFieldIfDoesntExist    string `yaml:"add_field_if_doesnt_exist" json:"add_field_if_doesnt_exist" doc:"adds a field to the entry if the field does not exist"`
-	AddFieldIf               string `yaml:"add_field_if" json:"add_field_if" doc:"add output field set to assignee if input field satisfies criteria from parameters field"`
-	AddRegExIf               string `yaml:"add_regex_if" json:"add_regex_if" doc:"add output field if input field satisfies regex pattern from parameters field"`
-	AddLabel                 string `yaml:"add_label" json:"add_label" doc:"add (input) field to list of labels with value taken from Value field (key=input, value=value)"`
-	AddLabelIf               string `yaml:"add_label_if" json:"add_label_if" doc:"add output field to list of labels with value taken from assignee field if input field satisfies criteria from parameters field"`
+type TransformFilterEnum string
+
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	RemoveField              TransformFilterEnum = "remove_field"                 // removes the field from the entry
+	RemoveEntryIfExists      TransformFilterEnum = "remove_entry_if_exists"       // removes the entry if the field exists
+	RemoveEntryIfDoesntExist TransformFilterEnum = "remove_entry_if_doesnt_exist" // removes the entry if the field does not exist
+	RemoveEntryIfEqual       TransformFilterEnum = "remove_entry_if_equal"        // removes the entry if the field value equals specified value
+	RemoveEntryIfNotEqual    TransformFilterEnum = "remove_entry_if_not_equal"    // removes the entry if the field value does not equal specified value
+	AddField                 TransformFilterEnum = "add_field"                    // adds (input) field to the entry; overrides previous value if present (key=input, value=value)
+	AddFieldIfDoesntExist    TransformFilterEnum = "add_field_if_doesnt_exist"    // adds a field to the entry if the field does not exist
+	AddFieldIf               TransformFilterEnum = "add_field_if"                 // add output field set to assignee if input field satisfies criteria from parameters field
+	AddRegExIf               TransformFilterEnum = "add_regex_if"                 // add output field if input field satisfies regex pattern from parameters field
+	AddLabel                 TransformFilterEnum = "add_label"                    // add (input) field to list of labels with value taken from Value field (key=input, value=value)
+	AddLabelIf               TransformFilterEnum = "add_label_if"                 // add output field to list of labels with value taken from assignee field if input field satisfies criteria from parameters field
+)
+
+type TransformFilterRule struct {
+	Type                     TransformFilterEnum              `yaml:"type,omitempty" json:"type,omitempty" doc:"(enum) one of the following:"`
+	RemoveField              *TransformFilterGenericRule      `yaml:"removeField,omitempty" json:"removeField,omitempty" doc:"configuration for remove_field rule"`
+	RemoveEntryIfExists      *TransformFilterGenericRule      `yaml:"removeEntryIfExists,omitempty" json:"removeEntryIfExists,omitempty" doc:"configuration for remove_entry_if_exists rule"`
+	RemoveEntryIfDoesntExist *TransformFilterGenericRule      `yaml:"removeEntryIfDoesntExist,omitempty" json:"removeEntryIfDoesntExist,omitempty" doc:"configuration for remove_entry_if_doesnt_exist rule"`
+	RemoveEntryIfEqual       *TransformFilterGenericRule      `yaml:"removeEntryIfEqual,omitempty" json:"removeEntryIfEqual,omitempty" doc:"configuration for remove_entry_if_equal rule"`
+	RemoveEntryIfNotEqual    *TransformFilterGenericRule      `yaml:"removeEntryIfNotEqual,omitempty" json:"removeEntryIfNotEqual,omitempty" doc:"configuration for remove_entry_if_not_equal rule"`
+	AddField                 *TransformFilterGenericRule      `yaml:"addField,omitempty" json:"addField,omitempty" doc:"configuration for add_field rule"`
+	AddFieldIfDoesntExist    *TransformFilterGenericRule      `yaml:"addFieldIfDoesntExist,omitempty" json:"addFieldIfDoesntExist,omitempty" doc:"configuration for add_field_if_doesnt_exist rule"`
+	AddFieldIf               *TransformFilterRuleWithAssignee `yaml:"addFieldIf,omitempty" json:"addFieldIf,omitempty" doc:"configuration for add_field_if rule"`
+	AddRegExIf               *TransformFilterRuleWithAssignee `yaml:"addRegexIf,omitempty" json:"addRegexIf,omitempty" doc:"configuration for add_regex_if rule"`
+	AddLabel                 *TransformFilterGenericRule      `yaml:"addLabel,omitempty" json:"addLabel,omitempty" doc:"configuration for add_label rule"`
+	AddLabelIf               *TransformFilterRuleWithAssignee `yaml:"addLabelIf,omitempty" json:"addLabelIf,omitempty" doc:"configuration for add_label_if rule"`
 }
 
-func TransformFilterOperationName(operation string) string {
-	return GetEnumName(TransformFilterOperationEnum{}, operation)
+type TransformFilterGenericRule struct {
+	Input string      `yaml:"input,omitempty" json:"input,omitempty" doc:"entry input field"`
+	Value interface{} `yaml:"value,omitempty" json:"value,omitempty" doc:"specified value of input field:"`
 }
 
-type TransformFilterRule struct {
-	Input      string      `yaml:"input,omitempty" json:"input,omitempty" doc:"entry input field"`
-	Output     string      `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
-	Type       string      `yaml:"type,omitempty" json:"type,omitempty" enum:"TransformFilterOperationEnum" doc:"one of the following:"`
-	Value      interface{} `yaml:"value,omitempty" json:"value,omitempty" doc:"specified value of input field:"`
-	Parameters string      `yaml:"parameters,omitempty" json:"parameters,omitempty" doc:"parameters specific to type"`
-	Assignee   string      `yaml:"assignee,omitempty" json:"assignee,omitempty" doc:"value needs to assign to output field"`
+type TransformFilterRuleWithAssignee struct {
+	Input      string `yaml:"input,omitempty" json:"input,omitempty" doc:"entry input field"`
+	Output     string `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
+	Parameters string `yaml:"parameters,omitempty" json:"parameters,omitempty" doc:"parameters specific to type"`
+	Assignee   string `yaml:"assignee,omitempty" json:"assignee,omitempty" doc:"value needs to assign to output field"`
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_generic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_generic.go
index d03e46cc032488c7c52499bef304598403fdd032..c4b94a2b553352508092f3dd02ff89ac7ffc473b 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_generic.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_generic.go
@@ -18,18 +18,17 @@
 package api
 
 type TransformGeneric struct {
-	Policy string                 `yaml:"policy,omitempty" json:"policy,omitempty" enum:"TransformGenericOperationEnum" doc:"key replacement policy; may be one of the following:"`
-	Rules  []GenericTransformRule `yaml:"rules,omitempty" json:"rules,omitempty" doc:"list of transform rules, each includes:"`
+	Policy TransformGenericOperationEnum `yaml:"policy,omitempty" json:"policy,omitempty" doc:"(enum) key replacement policy; may be one of the following:"`
+	Rules  []GenericTransformRule        `yaml:"rules,omitempty" json:"rules,omitempty" doc:"list of transform rules, each includes:"`
 }
 
-type TransformGenericOperationEnum struct {
-	PreserveOriginalKeys string `yaml:"preserve_original_keys" json:"preserve_original_keys" doc:"adds new keys in addition to existing keys (default)"`
-	ReplaceKeys          string `yaml:"replace_keys" json:"replace_keys" doc:"removes all old keys and uses only the new keys"`
-}
+type TransformGenericOperationEnum string
 
-func TransformGenericOperationName(operation string) string {
-	return GetEnumName(TransformGenericOperationEnum{}, operation)
-}
+const (
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	PreserveOriginalKeys TransformGenericOperationEnum = "preserve_original_keys" // adds new keys in addition to existing keys (default)
+	ReplaceKeys          TransformGenericOperationEnum = "replace_keys"           // removes all old keys and uses only the new keys
+)
 
 type GenericTransformRule struct {
 	Input      string `yaml:"input,omitempty" json:"input,omitempty" doc:"entry input field"`
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
index 1156c627675c627caff066e6c134a3b88cd3a86d..012a6cbd5526d1c51d3f43386e3adfc1b1957ba3 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
@@ -38,38 +38,27 @@ func (tn *TransformNetwork) GetServiceFiles() (string, string) {
 	return p, s
 }
 
+type TransformNetworkOperationEnum string
+
 const (
-	OpAddSubnet            = "add_subnet"
-	OpAddLocation          = "add_location"
-	OpAddService           = "add_service"
-	OpAddKubernetes        = "add_kubernetes"
-	OpAddKubernetesInfra   = "add_kubernetes_infra"
-	OpReinterpretDirection = "reinterpret_direction"
-	OpAddIPCategory        = "add_ip_category"
+	// For doc generation, enum definitions must match format `Constant Type = "value" // doc`
+	NetworkAddSubnet            TransformNetworkOperationEnum = "add_subnet"            // add output subnet field from input field and prefix length from parameters field
+	NetworkAddLocation          TransformNetworkOperationEnum = "add_location"          // add output location fields from input
+	NetworkAddService           TransformNetworkOperationEnum = "add_service"           // add output network service field from input port and parameters protocol field
+	NetworkAddKubernetes        TransformNetworkOperationEnum = "add_kubernetes"        // add output kubernetes fields from input
+	NetworkAddKubernetesInfra   TransformNetworkOperationEnum = "add_kubernetes_infra"  // add output kubernetes isInfra field from input
+	NetworkReinterpretDirection TransformNetworkOperationEnum = "reinterpret_direction" // reinterpret flow direction at the node level (instead of net interface), to ease the deduplication process
+	NetworkAddIPCategory        TransformNetworkOperationEnum = "add_ip_category"       // categorize IPs based on known subnets configuration
 )
 
-type TransformNetworkOperationEnum struct {
-	AddSubnet            string `yaml:"add_subnet" json:"add_subnet" doc:"add output subnet field from input field and prefix length from parameters field"`
-	AddLocation          string `yaml:"add_location" json:"add_location" doc:"add output location fields from input"`
-	AddService           string `yaml:"add_service" json:"add_service" doc:"add output network service field from input port and parameters protocol field"`
-	AddKubernetes        string `yaml:"add_kubernetes" json:"add_kubernetes" doc:"add output kubernetes fields from input"`
-	AddKubernetesInfra   string `yaml:"add_kubernetes_infra" json:"add_kubernetes_infra" doc:"add output kubernetes isInfra field from input"`
-	ReinterpretDirection string `yaml:"reinterpret_direction" json:"reinterpret_direction" doc:"reinterpret flow direction at the node level (instead of net interface), to ease the deduplication process"`
-	AddIPCategory        string `yaml:"add_ip_category" json:"add_ip_category" doc:"categorize IPs based on known subnets configuration"`
-}
-
-func TransformNetworkOperationName(operation string) string {
-	return GetEnumName(TransformNetworkOperationEnum{}, operation)
-}
-
 type NetworkTransformRule struct {
-	Type            string                 `yaml:"type,omitempty" json:"type,omitempty" enum:"TransformNetworkOperationEnum" doc:"one of the following:"`
-	KubernetesInfra *K8sInfraRule          `yaml:"kubernetes_infra,omitempty" json:"kubernetes_infra,omitempty" doc:"Kubernetes infra rule configuration"`
-	Kubernetes      *K8sRule               `yaml:"kubernetes,omitempty" json:"kubernetes,omitempty" doc:"Kubernetes rule configuration"`
-	AddSubnet       *NetworkAddSubnetRule  `yaml:"add_subnet,omitempty" json:"add_subnet,omitempty" doc:"Add subnet rule configuration"`
-	AddLocation     *NetworkGenericRule    `yaml:"add_location,omitempty" json:"add_location,omitempty" doc:"Add location rule configuration"`
-	AddIPCategory   *NetworkGenericRule    `yaml:"add_ip_category,omitempty" json:"add_ip_category,omitempty" doc:"Add ip category rule configuration"`
-	AddService      *NetworkAddServiceRule `yaml:"add_service,omitempty" json:"add_service,omitempty" doc:"Add service rule configuration"`
+	Type            TransformNetworkOperationEnum `yaml:"type,omitempty" json:"type,omitempty" doc:"(enum) one of the following:"`
+	KubernetesInfra *K8sInfraRule                 `yaml:"kubernetes_infra,omitempty" json:"kubernetes_infra,omitempty" doc:"Kubernetes infra rule configuration"`
+	Kubernetes      *K8sRule                      `yaml:"kubernetes,omitempty" json:"kubernetes,omitempty" doc:"Kubernetes rule configuration"`
+	AddSubnet       *NetworkAddSubnetRule         `yaml:"add_subnet,omitempty" json:"add_subnet,omitempty" doc:"Add subnet rule configuration"`
+	AddLocation     *NetworkGenericRule           `yaml:"add_location,omitempty" json:"add_location,omitempty" doc:"Add location rule configuration"`
+	AddIPCategory   *NetworkGenericRule           `yaml:"add_ip_category,omitempty" json:"add_ip_category,omitempty" doc:"Add ip category rule configuration"`
+	AddService      *NetworkAddServiceRule        `yaml:"add_service,omitempty" json:"add_service,omitempty" doc:"Add service rule configuration"`
 }
 
 type K8sInfraRule struct {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/write_loki.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/write_loki.go
index 2f5de699239186c88fed81d5dd44106b92544d91..61885751ea435b66f57e2da5de96eb0198e032d1 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/write_loki.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/write_loki.go
@@ -73,18 +73,18 @@ func (w *WriteLoki) SetDefaults() {
 	}
 }
 
-func (wl *WriteLoki) Validate() error {
-	if wl == nil {
+func (w *WriteLoki) Validate() error {
+	if w == nil {
 		return errors.New("you must provide a configuration")
 	}
-	if wl.TimestampScale == "" {
+	if w.TimestampScale == "" {
 		return errors.New("timestampUnit must be a valid Duration > 0 (e.g. 1m, 1s or 1ms)")
 	}
-	if wl.URL == "" {
+	if w.URL == "" {
 		return errors.New("url can't be empty")
 	}
-	if wl.BatchSize <= 0 {
-		return fmt.Errorf("invalid batchSize: %v. Required > 0", wl.BatchSize)
+	if w.BatchSize <= 0 {
+		return fmt.Errorf("invalid batchSize: %v. Required > 0", w.BatchSize)
 	}
 	return nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go
index 25c08c26999b103b11e11fc0cb5e2f9282efa097..1cf8ca896d35b97f03c4eaecd2eda9be84fc7a31 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/config.go
@@ -34,6 +34,9 @@ type Options struct {
 	Profile         Profile
 }
 
+// (nolint => needs refactoring)
+//
+//nolint:revive
 type ConfigFileStruct struct {
 	LogLevel        string          `yaml:"log-level,omitempty" json:"log-level,omitempty"`
 	MetricsSettings MetricsSettings `yaml:"metricsSettings,omitempty" json:"metricsSettings,omitempty"`
@@ -133,18 +136,18 @@ type Write struct {
 }
 
 // ParseConfig creates the internal unmarshalled representation from the Pipeline and Parameters json
-func ParseConfig(opts Options) (ConfigFileStruct, error) {
+func ParseConfig(opts *Options) (ConfigFileStruct, error) {
 	out := ConfigFileStruct{}
 
 	logrus.Debugf("opts.PipeLine = %v ", opts.PipeLine)
-	err := JsonUnmarshalStrict([]byte(opts.PipeLine), &out.Pipeline)
+	err := JSONUnmarshalStrict([]byte(opts.PipeLine), &out.Pipeline)
 	if err != nil {
 		logrus.Errorf("error when parsing pipeline: %v", err)
 		return out, err
 	}
 	logrus.Debugf("stages = %v ", out.Pipeline)
 
-	err = JsonUnmarshalStrict([]byte(opts.Parameters), &out.Parameters)
+	err = JSONUnmarshalStrict([]byte(opts.Parameters), &out.Parameters)
 	if err != nil {
 		logrus.Errorf("error when parsing pipeline parameters: %v", err)
 		return out, err
@@ -152,7 +155,7 @@ func ParseConfig(opts Options) (ConfigFileStruct, error) {
 	logrus.Debugf("params = %v ", out.Parameters)
 
 	if opts.MetricsSettings != "" {
-		err = JsonUnmarshalStrict([]byte(opts.MetricsSettings), &out.MetricsSettings)
+		err = JSONUnmarshalStrict([]byte(opts.MetricsSettings), &out.MetricsSettings)
 		if err != nil {
 			logrus.Errorf("error when parsing global metrics settings: %v", err)
 			return out, err
@@ -168,7 +171,7 @@ func ParseConfig(opts Options) (ConfigFileStruct, error) {
 // JsonUnmarshalStrict is like Unmarshal except that any fields that are found
 // in the data that do not have corresponding struct members, or mapping
 // keys that are duplicates, will result in an error.
-func JsonUnmarshalStrict(data []byte, v interface{}) error {
+func JSONUnmarshalStrict(data []byte, v interface{}) error {
 	dec := json.NewDecoder(bytes.NewReader(data))
 	dec.DisallowUnknownFields()
 	return dec.Decode(v)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/pipeline_builder.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/pipeline_builder.go
index 796da0631073a5117174e1730c5c270caeff2c0a..1fea8608b478f52cb6b4a8234d98752826b8c3d1 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/pipeline_builder.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/pipeline_builder.go
@@ -61,10 +61,12 @@ func NewPipeline(name string, ingest *Ingest) (PipelineBuilderStage, error) {
 	if ingest.Kafka != nil {
 		return NewKafkaPipeline(name, *ingest.Kafka), nil
 	}
-	return PipelineBuilderStage{}, errors.New("Missing ingest params")
+	return PipelineBuilderStage{}, errors.New("missing ingest params")
 }
 
 // NewCollectorPipeline creates a new pipeline from an `IngestCollector` initial stage (listening for NetFlows / IPFIX)
+//
+//nolint:golint,gocritic
 func NewCollectorPipeline(name string, ingest api.IngestCollector) PipelineBuilderStage {
 	p := pipeline{
 		stages: []Stage{{Name: name}},
@@ -74,6 +76,8 @@ func NewCollectorPipeline(name string, ingest api.IngestCollector) PipelineBuild
 }
 
 // NewGRPCPipeline creates a new pipeline from an `IngestGRPCProto` initial stage (listening for NetObserv's eBPF agent protobuf)
+//
+//nolint:golint,gocritic
 func NewGRPCPipeline(name string, ingest api.IngestGRPCProto) PipelineBuilderStage {
 	p := pipeline{
 		stages: []Stage{{Name: name}},
@@ -83,6 +87,8 @@ func NewGRPCPipeline(name string, ingest api.IngestGRPCProto) PipelineBuilderSta
 }
 
 // NewKafkaPipeline creates a new pipeline from an `IngestKafka` initial stage (listening for flow events on Kafka)
+//
+//nolint:golint,gocritic
 func NewKafkaPipeline(name string, ingest api.IngestKafka) PipelineBuilderStage {
 	p := pipeline{
 		stages: []Stage{{Name: name}},
@@ -127,11 +133,15 @@ func (b *PipelineBuilderStage) TransformFilter(name string, filter api.Transform
 }
 
 // TransformNetwork chains the current stage with a TransformNetwork stage and returns that new stage
+//
+//nolint:golint,gocritic
 func (b *PipelineBuilderStage) TransformNetwork(name string, nw api.TransformNetwork) PipelineBuilderStage {
 	return b.next(name, NewTransformNetworkParams(name, nw))
 }
 
 // ConnTrack chains the current stage with a ConnTrack stage and returns that new stage
+//
+//nolint:golint,gocritic
 func (b *PipelineBuilderStage) ConnTrack(name string, ct api.ConnTrack) PipelineBuilderStage {
 	return b.next(name, NewConnTrackParams(name, ct))
 }
@@ -142,11 +152,15 @@ func (b *PipelineBuilderStage) EncodePrometheus(name string, prom api.PromEncode
 }
 
 // EncodeKafka chains the current stage with an EncodeKafka stage (writing to a Kafka topic) and returns that new stage
+//
+//nolint:golint,gocritic
 func (b *PipelineBuilderStage) EncodeKafka(name string, kafka api.EncodeKafka) PipelineBuilderStage {
 	return b.next(name, NewEncodeKafkaParams(name, kafka))
 }
 
 // EncodeS3 chains the current stage with an EncodeS3 stage (writing to s3 bucket) and returns that new stage
+//
+//nolint:golint,gocritic
 func (b *PipelineBuilderStage) EncodeS3(name string, s3 api.EncodeS3) PipelineBuilderStage {
 	return b.next(name, NewEncodeS3Params(name, s3))
 }
@@ -157,6 +171,8 @@ func (b *PipelineBuilderStage) WriteStdout(name string, stdout api.WriteStdout)
 }
 
 // WriteLoki chains the current stage with a WriteLoki stage and returns that new stage
+//
+//nolint:golint,gocritic
 func (b *PipelineBuilderStage) WriteLoki(name string, loki api.WriteLoki) PipelineBuilderStage {
 	return b.next(name, NewWriteLokiParams(name, loki))
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/stage_params.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/stage_params.go
index 842fe7c5661e5c7ba223d54637ebe12602803ed4..6fcbe6daaf481da137e03e6d6719593e48750d46 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/stage_params.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/config/stage_params.go
@@ -29,6 +29,7 @@ func NewGRPCParams(name string, ingest api.IngestGRPCProto) StageParam {
 	return StageParam{Name: name, Ingest: &Ingest{Type: api.GRPCType, GRPC: &ingest}}
 }
 
+//nolint:golint,gocritic
 func NewKafkaParams(name string, ingest api.IngestKafka) StageParam {
 	return StageParam{Name: name, Ingest: &Ingest{Type: api.KafkaType, Kafka: &ingest}}
 }
@@ -45,10 +46,12 @@ func NewTransformFilterParams(name string, filter api.TransformFilter) StagePara
 	return StageParam{Name: name, Transform: &Transform{Type: api.FilterType, Filter: &filter}}
 }
 
+//nolint:golint,gocritic
 func NewTransformNetworkParams(name string, nw api.TransformNetwork) StageParam {
 	return StageParam{Name: name, Transform: &Transform{Type: api.NetworkType, Network: &nw}}
 }
 
+//nolint:golint,gocritic
 func NewConnTrackParams(name string, ct api.ConnTrack) StageParam {
 	return StageParam{Name: name, Extract: &Extract{Type: api.ConnTrackType, ConnTrack: &ct}}
 }
@@ -61,10 +64,12 @@ func NewEncodePrometheusParams(name string, prom api.PromEncode) StageParam {
 	return StageParam{Name: name, Encode: &Encode{Type: api.PromType, Prom: &prom}}
 }
 
+//nolint:golint,gocritic
 func NewEncodeKafkaParams(name string, kafka api.EncodeKafka) StageParam {
 	return StageParam{Name: name, Encode: &Encode{Type: api.KafkaType, Kafka: &kafka}}
 }
 
+//nolint:golint,gocritic
 func NewEncodeS3Params(name string, s3 api.EncodeS3) StageParam {
 	return StageParam{Name: name, Encode: &Encode{Type: api.S3Type, S3: &s3}}
 }
@@ -73,6 +78,7 @@ func NewWriteStdoutParams(name string, stdout api.WriteStdout) StageParam {
 	return StageParam{Name: name, Write: &Write{Type: api.StdoutType, Stdout: &stdout}}
 }
 
+//nolint:golint,gocritic
 func NewWriteLokiParams(name string, loki api.WriteLoki) StageParam {
 	return StageParam{Name: name, Write: &Write{Type: api.LokiType, Loki: &loki}}
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go
index 89b42617ec06c9dceb7e3371eab4feebe0b0c9c6..44488f232bae96a940a3864e24cb11542ac93472 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go
@@ -18,6 +18,7 @@
 package operational
 
 import (
+	"errors"
 	"fmt"
 	"sort"
 	"strings"
@@ -111,7 +112,8 @@ func NewMetrics(settings *config.MetricsSettings) *Metrics {
 func (o *Metrics) register(c prometheus.Collector, name string) {
 	err := prometheus.DefaultRegisterer.Register(c)
 	if err != nil {
-		if _, ok := err.(prometheus.AlreadyRegisteredError); ok {
+		var castErr prometheus.AlreadyRegisteredError
+		if errors.As(err, &castErr) {
 			logrus.Warningf("metrics registration error [%s]: %v", name, err)
 		} else if o.settings.NoPanic {
 			logrus.Errorf("metrics registration error [%s]: %v", name, err)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go
index 6c6cb317953787499107471aaaccccabf2cf0f2b..d85511e613cdabc70963ecc22cce22ae41d7d6c4 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go
@@ -31,9 +31,9 @@ type Decoder interface {
 
 func GetDecoder(params api.Decoder) (Decoder, error) {
 	switch params.Type {
-	case api.DecoderName("JSON"):
-		return NewDecodeJson()
-	case api.DecoderName("Protobuf"):
+	case api.DecoderJSON:
+		return NewDecodeJSON()
+	case api.DecoderProtobuf:
 		return decode.NewProtobuf()
 	}
 	panic(fmt.Sprintf("`decode` type %s not defined", params.Type))
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go
index 4bffdcaf1a61b3c93f26c1a5f61a83593c3c6951..c3b7481ab4a03a852e0e5c0f4105cf208d7a50eb 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go
@@ -25,14 +25,15 @@ import (
 	log "github.com/sirupsen/logrus"
 )
 
-type DecodeJson struct {
+//nolint:revive
+type DecodeJSON struct {
 }
 
 // Decode decodes input strings to a list of flow entries
-func (c *DecodeJson) Decode(line []byte) (config.GenericMap, error) {
+func (c *DecodeJSON) Decode(line []byte) (config.GenericMap, error) {
 
 	if log.IsLevelEnabled(log.DebugLevel) {
-		log.Debugf("decodeJson: line = %v", string(line))
+		log.Debugf("decodeJSON: line = %v", string(line))
 	}
 	var decodedLine map[string]interface{}
 	if err := json.Unmarshal(line, &decodedLine); err != nil {
@@ -51,8 +52,8 @@ func (c *DecodeJson) Decode(line []byte) (config.GenericMap, error) {
 	return decodedLine2, nil
 }
 
-// NewDecodeJson create a new decode
-func NewDecodeJson() (Decoder, error) {
-	log.Debugf("entering NewDecodeJson")
-	return &DecodeJson{}, nil
+// NewDecodeJSON create a new decode
+func NewDecodeJSON() (Decoder, error) {
+	log.Debugf("entering NewDecodeJSON")
+	return &DecodeJSON{}, nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go
index 7dcf0f24f40a5839ffd93d3a7bbbe164d881ab85..a59f76b3b8862fc1c3b42a6bb780b72c9f3b7af7 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go
@@ -76,15 +76,15 @@ func NewEncodeKafka(opMetrics *operational.Metrics, params config.StageParam) (E
 
 	var balancer kafkago.Balancer
 	switch config.Balancer {
-	case api.KafkaEncodeBalancerName("RoundRobin"):
+	case api.KafkaRoundRobin:
 		balancer = &kafkago.RoundRobin{}
-	case api.KafkaEncodeBalancerName("LeastBytes"):
+	case api.KafkaLeastBytes:
 		balancer = &kafkago.LeastBytes{}
-	case api.KafkaEncodeBalancerName("Hash"):
+	case api.KafkaHash:
 		balancer = &kafkago.Hash{}
-	case api.KafkaEncodeBalancerName("Crc32"):
+	case api.KafkaCrc32:
 		balancer = &kafkago.CRC32Balancer{}
-	case api.KafkaEncodeBalancerName("Murmur2"):
+	case api.KafkaMurmur2:
 		balancer = &kafkago.Murmur2Balancer{}
 	default:
 		balancer = nil
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
index 9cdef8e1a6a41d533d6c5e46f8127ace5d47091b..9f84bf169e3077a36e991bf19a7810cbac70687d 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go
@@ -30,6 +30,7 @@ import (
 
 const defaultExpiryTime = time.Duration(2 * time.Minute)
 
+// nolint:revive
 type EncodeProm struct {
 	cfg          *api.PromEncode
 	registerer   prometheus.Registerer
@@ -52,7 +53,7 @@ func (e *EncodeProm) ProcessCounter(m interface{}, labels map[string]string, val
 	return nil
 }
 
-func (e *EncodeProm) ProcessGauge(m interface{}, labels map[string]string, value float64, key string) error {
+func (e *EncodeProm) ProcessGauge(m interface{}, labels map[string]string, value float64, _ string) error {
 	gauge := m.(*prometheus.GaugeVec)
 	mm, err := gauge.GetMetricWith(labels)
 	if err != nil {
@@ -130,14 +131,15 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 	metricCommon := NewMetricsCommonStruct(opMetrics, cfg.MaxMetrics, params.Name, expiryTime, w.Cleanup)
 	w.metricCommon = metricCommon
 
-	for _, mCfg := range cfg.Metrics {
+	for i := range cfg.Metrics {
+		mCfg := &cfg.Metrics[i]
 		fullMetricName := cfg.Prefix + mCfg.Name
 		labels := mCfg.Labels
 		log.Debugf("fullMetricName = %v", fullMetricName)
 		log.Debugf("Labels = %v", labels)
 		mInfo := CreateMetricInfo(mCfg)
 		switch mCfg.Type {
-		case api.MetricEncodeOperationName("Counter"):
+		case api.MetricCounter:
 			counter := prometheus.NewCounterVec(prometheus.CounterOpts{Name: fullMetricName, Help: ""}, labels)
 			err := registerer.Register(counter)
 			if err != nil {
@@ -145,7 +147,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 				return nil, err
 			}
 			metricCommon.AddCounter(counter, mInfo)
-		case api.MetricEncodeOperationName("Gauge"):
+		case api.MetricGauge:
 			gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: fullMetricName, Help: ""}, labels)
 			err := registerer.Register(gauge)
 			if err != nil {
@@ -153,7 +155,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 				return nil, err
 			}
 			metricCommon.AddGauge(gauge, mInfo)
-		case api.MetricEncodeOperationName("Histogram"):
+		case api.MetricHistogram:
 			log.Debugf("buckets = %v", mCfg.Buckets)
 			hist := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: "", Buckets: mCfg.Buckets}, labels)
 			err := registerer.Register(hist)
@@ -162,7 +164,7 @@ func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (En
 				return nil, err
 			}
 			metricCommon.AddHist(hist, mInfo)
-		case api.MetricEncodeOperationName("AggHistogram"):
+		case api.MetricAggHistogram:
 			log.Debugf("buckets = %v", mCfg.Buckets)
 			hist := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: "", Buckets: mCfg.Buckets}, labels)
 			err := registerer.Register(hist)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go
index 618ccd2ae9f1a771ef1c04521b3d5bbf15173612..407e5515593dd9bff694a3e71847182d91a3c02b 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom_metric.go
@@ -14,7 +14,7 @@ type Predicate func(flow config.GenericMap) bool
 var variableExtractor, _ = regexp.Compile(`\$\(([^\)]+)\)`)
 
 type MetricInfo struct {
-	api.MetricsItem
+	*api.MetricsItem
 	FilterPredicates []Predicate
 }
 
@@ -76,17 +76,17 @@ func NotRegex(filter api.MetricsFilter) Predicate {
 
 func filterToPredicate(filter api.MetricsFilter) Predicate {
 	switch filter.Type {
-	case api.PromFilterEqual:
+	case api.MetricFilterEqual:
 		return Equal(filter)
-	case api.PromFilterNotEqual:
+	case api.MetricFilterNotEqual:
 		return NotEqual(filter)
-	case api.PromFilterPresence:
+	case api.MetricFilterPresence:
 		return Presence(filter)
-	case api.PromFilterAbsence:
+	case api.MetricFilterAbsence:
 		return Absence(filter)
-	case api.PromFilterRegex:
+	case api.MetricFilterRegex:
 		return Regex(filter)
-	case api.PromFilterNotRegex:
+	case api.MetricFilterNotRegex:
 		return NotRegex(filter)
 	}
 	// Default = Exact
@@ -118,7 +118,7 @@ func injectVars(flow config.GenericMap, filterValue string, varLookups [][]strin
 	return injected
 }
 
-func CreateMetricInfo(def api.MetricsItem) *MetricInfo {
+func CreateMetricInfo(def *api.MetricsItem) *MetricInfo {
 	mi := MetricInfo{
 		MetricsItem: def,
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go
index 408de5fb9368c465a6dd5253eec3b8aae4b8cc84..9b7bb1267e335218063f592e281b054bd2a6ecaa 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go
@@ -52,7 +52,7 @@ type encodeS3 struct {
 	mutex             *sync.Mutex
 	expiryTime        time.Time
 	exitChan          <-chan struct{}
-	streamId          string
+	streamID          string
 	intervalStartTime time.Time
 	sequenceNumber    int64
 }
@@ -79,7 +79,7 @@ func (s *encodeS3) writeObject() error {
 	day := fmt.Sprintf("%02d", now.Day())
 	hour := fmt.Sprintf("%02d", now.Hour())
 	seq := fmt.Sprintf("%08d", s.sequenceNumber)
-	objectName := s.s3Params.Account + "/year=" + year + "/month=" + month + "/day=" + day + "/hour=" + hour + "/stream-id=" + s.streamId + "/" + seq
+	objectName := s.s3Params.Account + "/year=" + year + "/month=" + month + "/day=" + day + "/hour=" + hour + "/stream-id=" + s.streamID + "/" + seq
 	log.Debugf("S3 writeObject: objectName = %s", objectName)
 	log.Debugf("S3 writeObject: object = %v", object)
 	s.pendingEntries = s.pendingEntries[nLogs:]
@@ -163,7 +163,7 @@ func NewEncodeS3(opMetrics *operational.Metrics, params config.StageParam) (Enco
 		pendingEntries:    make([]config.GenericMap, 0),
 		expiryTime:        time.Now().Add(configParams.WriteTimeout.Duration),
 		exitChan:          utils.ExitChannel(),
-		streamId:          time.Now().Format(time.RFC3339),
+		streamID:          time.Now().Format(time.RFC3339),
 		intervalStartTime: time.Now(),
 		mutex:             &sync.Mutex{},
 	}
@@ -174,7 +174,7 @@ func NewEncodeS3(opMetrics *operational.Metrics, params config.StageParam) (Enco
 func (e *encodeS3Writer) connectS3(config *api.EncodeS3) (*minio.Client, error) {
 	// Initialize s3 client object.
 	minioOptions := minio.Options{
-		Creds:  credentials.NewStaticV4(config.AccessKeyId, config.SecretAccessKey, ""),
+		Creds:  credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, ""),
 		Secure: config.Secure,
 	}
 	s3Client, err := minio.New(config.Endpoint, &minioOptions)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go
index c90a5a2e98dc33a8ca48732cb45fa18c2ad86611..28655d95960130c3371737cc00eec19a120188cb 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go
@@ -105,67 +105,67 @@ func (m *MetricsCommonStruct) AddAggHist(g interface{}, info *MetricInfo) {
 	m.aggHistos = append(m.aggHistos, mStruct)
 }
 
-func (e *MetricsCommonStruct) MetricCommonEncode(mci MetricsCommonInterface, metricRecord config.GenericMap) {
+func (m *MetricsCommonStruct) MetricCommonEncode(mci MetricsCommonInterface, metricRecord config.GenericMap) {
 	log.Tracef("entering MetricCommonEncode. metricRecord = %v", metricRecord)
 
 	// Process counters
-	for _, mInfo := range e.counters {
-		labels, value, _ := e.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+	for _, mInfo := range m.counters {
+		labels, value, _ := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
 		if labels == nil {
 			continue
 		}
 		err := mci.ProcessCounter(mInfo.genericMetric, labels, value)
 		if err != nil {
 			log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
-			e.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+			m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
 			continue
 		}
-		e.metricsProcessed.Inc()
+		m.metricsProcessed.Inc()
 	}
 
 	// Process gauges
-	for _, mInfo := range e.gauges {
-		labels, value, key := e.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+	for _, mInfo := range m.gauges {
+		labels, value, key := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
 		if labels == nil {
 			continue
 		}
 		err := mci.ProcessGauge(mInfo.genericMetric, labels, value, key)
 		if err != nil {
 			log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
-			e.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+			m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
 			continue
 		}
-		e.metricsProcessed.Inc()
+		m.metricsProcessed.Inc()
 	}
 
 	// Process histograms
-	for _, mInfo := range e.histos {
-		labels, value, _ := e.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+	for _, mInfo := range m.histos {
+		labels, value, _ := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric)
 		if labels == nil {
 			continue
 		}
 		err := mci.ProcessHist(mInfo.genericMetric, labels, value)
 		if err != nil {
 			log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
-			e.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+			m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
 			continue
 		}
-		e.metricsProcessed.Inc()
+		m.metricsProcessed.Inc()
 	}
 
 	// Process pre-aggregated histograms
-	for _, mInfo := range e.aggHistos {
-		labels, values := e.prepareAggHisto(mci, metricRecord, mInfo.info, mInfo.genericMetric)
+	for _, mInfo := range m.aggHistos {
+		labels, values := m.prepareAggHisto(mci, metricRecord, mInfo.info, mInfo.genericMetric)
 		if labels == nil {
 			continue
 		}
 		err := mci.ProcessAggHist(mInfo.genericMetric, labels, values)
 		if err != nil {
 			log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err)
-			e.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
+			m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc()
 			continue
 		}
-		e.metricsProcessed.Inc()
+		m.metricsProcessed.Inc()
 	}
 }
 
@@ -183,10 +183,10 @@ func (m *MetricsCommonStruct) prepareMetric(mci MetricsCommonInterface, flow con
 		floatVal = floatVal / info.ValueScale
 	}
 
-	entryLabels, key := extractLabelsAndKey(flow, &info.MetricsItem)
+	entryLabels, key := extractLabelsAndKey(flow, info.MetricsItem)
 	// Update entry for expiry mechanism (the entry itself is its own cleanup function)
 	cacheEntry := mci.GetChacheEntry(entryLabels, mv)
-	_, ok := m.mCache.UpdateCacheEntry(key, cacheEntry)
+	ok := m.mCache.UpdateCacheEntry(key, cacheEntry)
 	if !ok {
 		m.metricsDropped.Inc()
 		return nil, 0, ""
@@ -205,10 +205,10 @@ func (m *MetricsCommonStruct) prepareAggHisto(mci MetricsCommonInterface, flow c
 		return nil, nil
 	}
 
-	entryLabels, key := extractLabelsAndKey(flow, &info.MetricsItem)
+	entryLabels, key := extractLabelsAndKey(flow, info.MetricsItem)
 	// Update entry for expiry mechanism (the entry itself is its own cleanup function)
 	cacheEntry := mci.GetChacheEntry(entryLabels, mc)
-	_, ok = m.mCache.UpdateCacheEntry(key, cacheEntry)
+	ok = m.mCache.UpdateCacheEntry(key, cacheEntry)
 	if !ok {
 		m.metricsDropped.Inc()
 		return nil, nil
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
index d67a073c858e227ebec9bfc034501c5a059c959b..00ee43370b8391eef9b285de2014ca9525a28e0f 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go
@@ -42,7 +42,7 @@ func (e *EncodeOtlpLogs) Encode(entry config.GenericMap) {
 	e.LogWrite(entry)
 }
 
-func NewEncodeOtlpLogs(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+func NewEncodeOtlpLogs(_ *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
 	log.Tracef("entering NewEncodeOtlpLogs \n")
 	cfg := api.EncodeOtlpLogs{}
 	if params.Encode != nil && params.Encode.OtlpLogs != nil {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
index c645bd53d645ebbb79f86f578df686841d85c5c0..36b2908bd8ecdcfb861ca9093422429373c6ab8c 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go
@@ -85,7 +85,7 @@ func (e *EncodeOtlpMetrics) ProcessAggHist(m interface{}, labels map[string]stri
 	return nil
 }
 
-func (e *EncodeOtlpMetrics) GetChacheEntry(entryLabels map[string]string, m interface{}) interface{} {
+func (e *EncodeOtlpMetrics) GetChacheEntry(entryLabels map[string]string, _ interface{}) interface{} {
 	return entryLabels
 }
 
@@ -126,21 +126,22 @@ func NewEncodeOtlpMetrics(opMetrics *operational.Metrics, params config.StagePar
 	metricCommon := encode.NewMetricsCommonStruct(opMetrics, 0, params.Name, expiryTime, nil)
 	w.metricCommon = metricCommon
 
-	for _, mCfg := range cfg.Metrics {
+	for i := range cfg.Metrics {
+		mCfg := &cfg.Metrics[i]
 		fullMetricName := cfg.Prefix + mCfg.Name
 		labels := mCfg.Labels
 		log.Debugf("fullMetricName = %v", fullMetricName)
 		log.Debugf("Labels = %v", labels)
 		mInfo := encode.CreateMetricInfo(mCfg)
 		switch mCfg.Type {
-		case api.MetricEncodeOperationName("Counter"):
+		case api.MetricCounter:
 			counter, err := meter.Float64Counter(fullMetricName)
 			if err != nil {
 				log.Errorf("error during counter creation: %v", err)
 				return nil, err
 			}
 			metricCommon.AddCounter(counter, mInfo)
-		case api.MetricEncodeOperationName("Gauge"):
+		case api.MetricGauge:
 			// at implementation time, only asynchronous gauges are supported by otel in golang
 			obs := Float64Gauge{observations: make(map[string]Float64GaugeEntry)}
 			gauge, err := meterFactory.Float64ObservableGauge(
@@ -152,7 +153,7 @@ func NewEncodeOtlpMetrics(opMetrics *operational.Metrics, params config.StagePar
 				return nil, err
 			}
 			metricCommon.AddGauge(gauge, mInfo)
-		case api.MetricEncodeOperationName("Histogram"):
+		case api.MetricHistogram:
 			var histo metric.Float64Histogram
 			if len(mCfg.Buckets) == 0 {
 				histo, err = meter.Float64Histogram(fullMetricName)
@@ -167,7 +168,9 @@ func NewEncodeOtlpMetrics(opMetrics *operational.Metrics, params config.StagePar
 				return nil, err
 			}
 			metricCommon.AddHist(histo, mInfo)
-		case "default":
+		case api.MetricAggHistogram:
+			fallthrough
+		default:
 			log.Errorf("invalid metric type = %v, skipping", mCfg.Type)
 			continue
 		}
@@ -189,7 +192,7 @@ type Float64Gauge struct {
 
 // Callback implements the callback function for the underlying asynchronous gauge
 // it observes the current state of all previous Set() calls.
-func (f *Float64Gauge) Callback(ctx context.Context, o metric.Float64Observer) error {
+func (f *Float64Gauge) Callback(_ context.Context, o metric.Float64Observer) error {
 	for _, fEntry := range f.observations {
 		o.Observe(fEntry.value, metric.WithAttributes(fEntry.attributes...))
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
index e1fe987919f4020b94374139c38475dbb1c2e2cc..4930c4e51b8bac522e99d4abaf4a8c67d87efa25 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go
@@ -91,7 +91,7 @@ OUTER:
 	}
 }
 
-func NewEncodeOtlpTraces(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
+func NewEncodeOtlpTraces(_ *operational.Metrics, params config.StageParam) (encode.Encoder, error) {
 	log.Tracef("entering NewEncodeOtlpTraces \n")
 	cfg := api.EncodeOtlpTraces{}
 	if params.Encode != nil && params.Encode.OtlpTraces != nil {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
index dae256f3ad5004c4bbd1eb355e2ec9732fb6f138..e7372a19a1947d344f7ebf4d391353d4f431836e 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go
@@ -309,7 +309,7 @@ func obtainAttributesFromLabels(labels map[string]string) []attribute.KeyValue {
 	return att
 }
 
-func (e *EncodeOtlpMetrics) MetricWrite(entry config.GenericMap) {
+func (e *EncodeOtlpMetrics) MetricWrite(_ config.GenericMap) {
 	// nothing more to do at present
 }
 
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go
index b96b14dff5f5e975059dd1139ac813631b936054..621b4f60130a3336fbad30c0bc3501787626e1e0 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go
@@ -45,7 +45,7 @@ type Labels map[string]string
 type NormalizedValues string
 
 type Aggregate struct {
-	Definition api.AggregateDefinition
+	definition *api.AggregateDefinition
 	cache      *utils.TimedCache
 	mutex      *sync.Mutex
 	expiryTime time.Duration
@@ -65,7 +65,7 @@ func (aggregate *Aggregate) LabelsFromEntry(entry config.GenericMap) (Labels, bo
 	allLabelsFound := true
 	labels := Labels{}
 
-	for _, key := range aggregate.Definition.GroupByKeys {
+	for _, key := range aggregate.definition.GroupByKeys {
 		value, ok := entry[key]
 		if !ok {
 			allLabelsFound = false
@@ -96,14 +96,14 @@ func (labels Labels) getNormalizedValues() NormalizedValues {
 	return NormalizedValues(normalizedAsString)
 }
 
-func (aggregate *Aggregate) FilterEntry(entry config.GenericMap) (error, NormalizedValues, Labels) {
+func (aggregate *Aggregate) filterEntry(entry config.GenericMap) (NormalizedValues, Labels, error) {
 	labels, allLabelsFound := aggregate.LabelsFromEntry(entry)
 	if !allLabelsFound {
-		return fmt.Errorf("missing keys in entry"), "", nil
+		return "", nil, fmt.Errorf("missing keys in entry")
 	}
 
 	normalizedValues := labels.getNormalizedValues()
-	return nil, normalizedValues, labels
+	return normalizedValues, labels, nil
 }
 
 func getInitValue(operation string) float64 {
@@ -130,10 +130,10 @@ func (aggregate *Aggregate) UpdateByEntry(entry config.GenericMap, normalizedVal
 	oldEntry, ok := aggregate.cache.GetCacheEntry(string(normalizedValues))
 	if !ok {
 		groupState = &GroupState{normalizedValues: normalizedValues, labels: labels}
-		initVal := getInitValue(string(aggregate.Definition.OperationType))
+		initVal := getInitValue(string(aggregate.definition.OperationType))
 		groupState.totalValue = initVal
 		groupState.recentOpValue = initVal
-		if aggregate.Definition.OperationType == OperationRawValues {
+		if aggregate.definition.OperationType == OperationRawValues {
 			groupState.recentRawValues = make([]float64, 0)
 		}
 	} else {
@@ -142,8 +142,8 @@ func (aggregate *Aggregate) UpdateByEntry(entry config.GenericMap, normalizedVal
 	aggregate.cache.UpdateCacheEntry(string(normalizedValues), groupState)
 
 	// update value
-	operationKey := aggregate.Definition.OperationKey
-	operation := aggregate.Definition.OperationType
+	operationKey := aggregate.definition.OperationKey
+	operation := aggregate.definition.OperationType
 
 	if operation == OperationCount {
 		groupState.totalValue = float64(groupState.totalCount + 1)
@@ -179,8 +179,8 @@ func (aggregate *Aggregate) UpdateByEntry(entry config.GenericMap, normalizedVal
 	}
 
 	// update count
-	groupState.totalCount += 1
-	groupState.recentCount += 1
+	groupState.totalCount++
+	groupState.recentCount++
 
 	return nil
 }
@@ -188,7 +188,7 @@ func (aggregate *Aggregate) UpdateByEntry(entry config.GenericMap, normalizedVal
 func (aggregate *Aggregate) Evaluate(entries []config.GenericMap) error {
 	for _, entry := range entries {
 		// filter entries matching labels with aggregates
-		err, normalizedValues, labels := aggregate.FilterEntry(entry)
+		normalizedValues, labels, err := aggregate.filterEntry(entry)
 		if err != nil {
 			continue
 		}
@@ -211,37 +211,33 @@ func (aggregate *Aggregate) GetMetrics() []config.GenericMap {
 	var metrics []config.GenericMap
 
 	// iterate over the items in the cache
-	aggregate.cache.Iterate(func(key string, value interface{}) {
+	aggregate.cache.Iterate(func(_ string, value interface{}) {
 		group := value.(*GroupState)
 		newEntry := config.GenericMap{
-			"name":              aggregate.Definition.Name,
-			"operation_type":    aggregate.Definition.OperationType,
-			"operation_key":     aggregate.Definition.OperationKey,
-			"by":                strings.Join(aggregate.Definition.GroupByKeys, ","),
+			"name":              aggregate.definition.Name,
+			"operation_type":    aggregate.definition.OperationType,
+			"operation_key":     aggregate.definition.OperationKey,
+			"by":                strings.Join(aggregate.definition.GroupByKeys, ","),
 			"aggregate":         string(group.normalizedValues),
 			"total_value":       group.totalValue,
 			"total_count":       group.totalCount,
 			"recent_raw_values": group.recentRawValues,
 			"recent_op_value":   group.recentOpValue,
 			"recent_count":      group.recentCount,
-			strings.Join(aggregate.Definition.GroupByKeys, "_"): string(group.normalizedValues),
+			strings.Join(aggregate.definition.GroupByKeys, "_"): string(group.normalizedValues),
 		}
-		// add the items in aggregate.Definition.GroupByKeys individually to the entry
-		for _, key := range aggregate.Definition.GroupByKeys {
+		// add the items in aggregate.definition.GroupByKeys individually to the entry
+		for _, key := range aggregate.definition.GroupByKeys {
 			newEntry[key] = group.labels[key]
 		}
 		metrics = append(metrics, newEntry)
 		// Once reported, we reset the recentXXX fields
-		if aggregate.Definition.OperationType == OperationRawValues {
+		if aggregate.definition.OperationType == OperationRawValues {
 			group.recentRawValues = make([]float64, 0)
 		}
 		group.recentCount = 0
-		group.recentOpValue = getInitValue(string(aggregate.Definition.OperationType))
+		group.recentOpValue = getInitValue(string(aggregate.definition.OperationType))
 	})
 
 	return metrics
 }
-
-func (aggregate Aggregate) Cleanup(entry interface{}) {
-	// nothing special to do in this callback function
-}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go
index 775b90baf66fab47bfcb400cef9309c810bb80b7..de092d5e41149533fb379b1a7011afe83555b34a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go
@@ -58,13 +58,13 @@ func (aggregates *Aggregates) GetMetrics() []config.GenericMap {
 	return metrics
 }
 
-func (aggregates *Aggregates) AddAggregate(aggregateDefinition api.AggregateDefinition) []Aggregate {
+func (aggregates *Aggregates) addAggregate(aggregateDefinition *api.AggregateDefinition) []Aggregate {
 	expiryTime := aggregateDefinition.ExpiryTime
 	if expiryTime.Duration == 0 {
 		expiryTime.Duration = defaultExpiryTime
 	}
 	aggregate := Aggregate{
-		Definition: aggregateDefinition,
+		definition: aggregateDefinition,
 		cache:      utils.NewTimedCache(0, nil),
 		mutex:      &sync.Mutex{},
 		expiryTime: expiryTime.Duration,
@@ -92,7 +92,7 @@ func (aggregates *Aggregates) cleanupExpiredEntriesLoop() {
 func (aggregates *Aggregates) cleanupExpiredEntries() {
 	for _, aggregate := range aggregates.Aggregates {
 		aggregate.mutex.Lock()
-		aggregate.cache.CleanupExpiredEntries(aggregate.expiryTime, aggregate.Cleanup)
+		aggregate.cache.CleanupExpiredEntries(aggregate.expiryTime, func(_ interface{}) {})
 		aggregate.mutex.Unlock()
 	}
 }
@@ -106,8 +106,8 @@ func NewAggregatesFromConfig(aggConfig *api.Aggregates) (Aggregates, error) {
 		aggregates.defaultExpiryTime = defaultExpiryTime
 	}
 
-	for _, aggregateDefinition := range aggConfig.Rules {
-		aggregates.Aggregates = aggregates.AddAggregate(aggregateDefinition)
+	for i := range aggConfig.Rules {
+		aggregates.Aggregates = aggregates.addAggregate(&aggConfig.Rules[i])
 	}
 
 	aggregates.cleanupExpiredEntriesLoop()
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go
index 3e84d7a520a84620b75fa9382d374f8c0a75164c..cf5b1de01da3894b8bb02d202c677c8236cdb8fd 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go
@@ -68,22 +68,22 @@ func newAggregator(of api.OutputField, metrics *metricsType) (aggregator, error)
 	aggBase := aggregateBase{inputField: inputField, outputField: of.Name, splitAB: of.SplitAB, metrics: metrics, reportMissing: of.ReportMissing}
 	var agg aggregator
 	switch of.Operation {
-	case api.ConnTrackOperationName("Sum"):
+	case api.ConnTrackSum:
 		aggBase.initVal = float64(0)
 		agg = &aSum{aggBase}
-	case api.ConnTrackOperationName("Count"):
+	case api.ConnTrackCount:
 		aggBase.initVal = float64(0)
 		agg = &aCount{aggBase}
-	case api.ConnTrackOperationName("Min"):
+	case api.ConnTrackMin:
 		aggBase.initVal = math.MaxFloat64
 		agg = &aMin{aggBase}
-	case api.ConnTrackOperationName("Max"):
+	case api.ConnTrackMax:
 		aggBase.initVal = -math.MaxFloat64
 		agg = &aMax{aggBase}
-	case api.ConnTrackOperationName("First"):
+	case api.ConnTrackFirst:
 		aggBase.initVal = nil
 		agg = &aFirst{aggBase}
-	case api.ConnTrackOperationName("Last"):
+	case api.ConnTrackLast:
 		aggBase.initVal = nil
 		agg = &aLast{aggBase}
 	default:
@@ -100,6 +100,8 @@ func (agg *aggregateBase) getOutputField(d direction) string {
 			outputField += "_AB"
 		case dirBA:
 			outputField += "_BA"
+		case dirNA:
+			fallthrough
 		default:
 			log.Panicf("splitAB aggregator %v cannot determine outputField because direction is missing. Check configuration.", outputField)
 		}
@@ -139,7 +141,7 @@ func (agg *aggregateBase) addField(conn connection) {
 	}
 }
 
-func (agg *aSum) update(conn connection, flowLog config.GenericMap, d direction, isNew bool) {
+func (agg *aSum) update(conn connection, flowLog config.GenericMap, d direction, _ bool) {
 	outputField := agg.getOutputField(d)
 	v, err := agg.getInputFieldValue(flowLog)
 	if err != nil {
@@ -151,14 +153,14 @@ func (agg *aSum) update(conn connection, flowLog config.GenericMap, d direction,
 	})
 }
 
-func (agg *aCount) update(conn connection, flowLog config.GenericMap, d direction, isNew bool) {
+func (agg *aCount) update(conn connection, _ config.GenericMap, d direction, _ bool) {
 	outputField := agg.getOutputField(d)
 	conn.updateAggFnValue(outputField, func(curr float64) float64 {
 		return curr + 1
 	})
 }
 
-func (agg *aMin) update(conn connection, flowLog config.GenericMap, d direction, isNew bool) {
+func (agg *aMin) update(conn connection, flowLog config.GenericMap, d direction, _ bool) {
 	outputField := agg.getOutputField(d)
 	v, err := agg.getInputFieldValue(flowLog)
 	if err != nil {
@@ -171,7 +173,7 @@ func (agg *aMin) update(conn connection, flowLog config.GenericMap, d direction,
 	})
 }
 
-func (agg *aMax) update(conn connection, flowLog config.GenericMap, d direction, isNew bool) {
+func (agg *aMax) update(conn connection, flowLog config.GenericMap, d direction, _ bool) {
 	outputField := agg.getOutputField(d)
 	v, err := agg.getInputFieldValue(flowLog)
 	if err != nil {
@@ -184,13 +186,13 @@ func (agg *aMax) update(conn connection, flowLog config.GenericMap, d direction,
 	})
 }
 
-func (cp *aFirst) update(conn connection, flowLog config.GenericMap, d direction, isNew bool) {
+func (cp *aFirst) update(conn connection, flowLog config.GenericMap, _ direction, isNew bool) {
 	if isNew {
 		conn.updateAggValue(cp.outputField, flowLog[cp.inputField])
 	}
 }
 
-func (cp *aLast) update(conn connection, flowLog config.GenericMap, d direction, isNew bool) {
+func (cp *aLast) update(conn connection, flowLog config.GenericMap, _ direction, _ bool) {
 	if flowLog[cp.inputField] != nil {
 		conn.updateAggValue(cp.outputField, flowLog[cp.inputField])
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go
index d3b9594e6398810ef92e485bf949c7c2073d2fb4..8fae6777e9a4894f32001daae2e0fbce589329b6 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go
@@ -123,6 +123,7 @@ func (c *connType) markReported() bool {
 	return isFirst
 }
 
+//nolint:cyclop
 func (c *connType) isMatchSelector(selector map[string]interface{}) bool {
 	for k, v := range selector {
 		connValueRaw, found := c.keys[k]
@@ -182,7 +183,7 @@ type connBuilder struct {
 	metrics      *metricsType
 }
 
-func NewConnBuilder(metrics *metricsType) *connBuilder {
+func newConnBuilder(metrics *metricsType) *connBuilder {
 	return &connBuilder{
 		conn: &connType{
 			aggFields:  make(map[string]interface{}),
@@ -206,7 +207,7 @@ func (cb *connBuilder) ShouldSwapAB(b bool) *connBuilder {
 	return cb
 }
 
-func (cb *connBuilder) KeysFrom(flowLog config.GenericMap, kd api.KeyDefinition, endpointAFields, endpointBFields []string) *connBuilder {
+func (cb *connBuilder) keysFrom(flowLog config.GenericMap, kd *api.KeyDefinition, endpointAFields, endpointBFields []string) *connBuilder {
 	for _, fg := range kd.FieldGroups {
 		for _, f := range fg.Fields {
 			cb.conn.keys[f] = flowLog[f]
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go
index 4747f598bd452849be4bd3d04e2ae6a9ccc3a20e..47d67a49388b1249709c6edf8d87ef4a2f0423b9 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go
@@ -72,7 +72,7 @@ func (ct *conntrackImpl) Extract(flowLogs []config.GenericMap) []config.GenericM
 			ct.metrics.inputRecords.WithLabelValues("discarded").Inc()
 			continue
 		}
-		computedHash, err := ComputeHash(fl, ct.config.KeyDefinition, ct.hashProvider(), ct.metrics)
+		computedHash, err := computeHash(fl, &ct.config.KeyDefinition, ct.hashProvider(), ct.metrics)
 		if err != nil {
 			log.Warningf("skipping flow log %v: %v", fl, err)
 			ct.metrics.inputRecords.WithLabelValues("rejected").Inc()
@@ -89,11 +89,11 @@ func (ct *conntrackImpl) Extract(flowLogs []config.GenericMap) []config.GenericM
 					log.Warningf("too many connections; skipping flow log %v: ", fl)
 					ct.metrics.inputRecords.WithLabelValues("discarded").Inc()
 				} else {
-					builder := NewConnBuilder(ct.metrics)
+					builder := newConnBuilder(ct.metrics)
 					conn = builder.
-						ShouldSwapAB(ct.config.TCPFlags.SwapAB && ct.containsTcpFlag(fl, SYN_ACK_FLAG)).
+						ShouldSwapAB(ct.config.TCPFlags.SwapAB && ct.containsTCPFlag(fl, SYNACKFlag)).
 						Hash(computedHash).
-						KeysFrom(fl, ct.config.KeyDefinition, ct.endpointAFields, ct.endpointBFields).
+						keysFrom(fl, &ct.config.KeyDefinition, ct.endpointAFields, ct.endpointBFields).
 						Aggregators(ct.aggregators).
 						Hash(computedHash).
 						Build()
@@ -104,7 +104,7 @@ func (ct *conntrackImpl) Extract(flowLogs []config.GenericMap) []config.GenericM
 					if ct.shouldOutputNewConnection {
 						record := conn.toGenericMap()
 						addHashField(record, computedHash.hashTotal)
-						addTypeField(record, api.ConnTrackOutputRecordTypeName("NewConnection"))
+						addTypeField(record, api.ConnTrackNewConnection)
 						isFirst := conn.markReported()
 						addIsFirstField(record, isFirst)
 						outputRecords = append(outputRecords, record)
@@ -120,7 +120,7 @@ func (ct *conntrackImpl) Extract(flowLogs []config.GenericMap) []config.GenericM
 		if ct.shouldOutputFlowLogs {
 			record := fl.Copy()
 			addHashField(record, computedHash.hashTotal)
-			addTypeField(record, api.ConnTrackOutputRecordTypeName("FlowLog"))
+			addTypeField(record, api.ConnTrackFlowLog)
 			outputRecords = append(outputRecords, record)
 			ct.metrics.outputRecords.WithLabelValues("flowLog").Inc()
 		}
@@ -149,7 +149,7 @@ func (ct *conntrackImpl) popEndConnections() []config.GenericMap {
 	for _, conn := range connections {
 		record := conn.toGenericMap()
 		addHashField(record, conn.getHash().hashTotal)
-		addTypeField(record, api.ConnTrackOutputRecordTypeName("EndConnection"))
+		addTypeField(record, api.ConnTrackEndConnection)
 		var isFirst bool
 		if ct.shouldOutputEndConnection {
 			isFirst = conn.markReported()
@@ -168,7 +168,7 @@ func (ct *conntrackImpl) prepareHeartbeatRecords() []config.GenericMap {
 	for _, conn := range connections {
 		record := conn.toGenericMap()
 		addHashField(record, conn.getHash().hashTotal)
-		addTypeField(record, api.ConnTrackOutputRecordTypeName("Heartbeat"))
+		addTypeField(record, api.ConnTrackHeartbeat)
 		var isFirst bool
 		if ct.shouldOutputHeartbeats {
 			isFirst = conn.markReported()
@@ -185,7 +185,7 @@ func (ct *conntrackImpl) updateConnection(conn connection, flowLog config.Generi
 		agg.update(conn, flowLog, d, isNew)
 	}
 
-	if ct.config.TCPFlags.DetectEndConnection && ct.containsTcpFlag(flowLog, FIN_FLAG) {
+	if ct.config.TCPFlags.DetectEndConnection && ct.containsTCPFlag(flowLog, FINFlag) {
 		ct.metrics.tcpFlags.WithLabelValues("detectEndConnection").Inc()
 		ct.connStore.setConnectionTerminating(flowLogHash.hashTotal)
 	} else {
@@ -193,7 +193,7 @@ func (ct *conntrackImpl) updateConnection(conn connection, flowLog config.Generi
 	}
 }
 
-func (ct *conntrackImpl) containsTcpFlag(flowLog config.GenericMap, queryFlag uint32) bool {
+func (ct *conntrackImpl) containsTCPFlag(flowLog config.GenericMap, queryFlag uint32) bool {
 	tcpFlagsRaw, ok := flowLog[ct.config.TCPFlags.FieldName]
 	if ok {
 		tcpFlags, err := utils.ConvertToUint32(tcpFlagsRaw)
@@ -247,13 +247,13 @@ func NewConnectionTrack(opMetrics *operational.Metrics, params config.StageParam
 	shouldOutputHeartbeats := false
 	for _, option := range cfg.OutputRecordTypes {
 		switch option {
-		case api.ConnTrackOutputRecordTypeName("FlowLog"):
+		case api.ConnTrackFlowLog:
 			shouldOutputFlowLogs = true
-		case api.ConnTrackOutputRecordTypeName("NewConnection"):
+		case api.ConnTrackNewConnection:
 			shouldOutputNewConnection = true
-		case api.ConnTrackOutputRecordTypeName("EndConnection"):
+		case api.ConnTrackEndConnection:
 			shouldOutputEndConnection = true
-		case api.ConnTrackOutputRecordTypeName("Heartbeat"):
+		case api.ConnTrackHeartbeat:
 			shouldOutputHeartbeats = true
 		default:
 			return nil, fmt.Errorf("unknown OutputRecordTypes: %v", option)
@@ -278,11 +278,11 @@ func NewConnectionTrack(opMetrics *operational.Metrics, params config.StageParam
 	return conntrack, nil
 }
 
-func addHashField(record config.GenericMap, hashId uint64) {
-	record[api.HashIdFieldName] = strconv.FormatUint(hashId, 16)
+func addHashField(record config.GenericMap, hashID uint64) {
+	record[api.HashIDFieldName] = strconv.FormatUint(hashID, 16)
 }
 
-func addTypeField(record config.GenericMap, recordType string) {
+func addTypeField(record config.GenericMap, recordType api.ConnTrackOutputRecordTypeEnum) {
 	record[api.RecordTypeFieldName] = recordType
 }
 
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go
index 374c8bc21f036522899e73e6127f1460e7f7679f..042331b73ab832fa165e942a23cb74c721402cfa 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go
@@ -36,9 +36,9 @@ type totalHashType struct {
 	hashTotal uint64
 }
 
-// ComputeHash computes the hash of a flow log according to keyDefinition.
+// computeHash computes the hash of a flow log according to keyDefinition.
 // Two flow logs will have the same hash if they belong to the same connection.
-func ComputeHash(flowLog config.GenericMap, keyDefinition api.KeyDefinition, hasher hash.Hash64, metrics *metricsType) (totalHashType, error) {
+func computeHash(flowLog config.GenericMap, keyDefinition *api.KeyDefinition, hasher hash.Hash64, metrics *metricsType) (totalHashType, error) {
 	fieldGroup2hash := make(map[string]uint64)
 
 	// Compute the hash of each field group
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go
index dd7bfe59c517d60bb3007cc07650254243e6bc8b..bc7040d64ec0d079444eb23d9c7269180b532744 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go
@@ -40,7 +40,7 @@ const (
 // This allows efficient retrieval and removal of connections.
 type connectionStore struct {
 	groups          []*groupType
-	hashId2groupIdx map[uint64]int
+	hashID2groupIdx map[uint64]int
 	metrics         *metricsType
 	now             func() time.Time
 }
@@ -68,23 +68,23 @@ func (cs *connectionStore) getGroupIdx(conn connection) (groupIdx int) {
 	return lastGroupIdx
 }
 
-func (cs *connectionStore) addConnection(hashId uint64, conn connection) {
+func (cs *connectionStore) addConnection(hashID uint64, conn connection) {
 	groupIdx := cs.getGroupIdx(conn)
 	mom := cs.groups[groupIdx].activeMom
 
-	err := mom.AddRecord(utils.Key(hashId), conn)
+	err := mom.AddRecord(utils.Key(hashID), conn)
 	if err != nil {
-		log.Errorf("BUG. connection with hash %x already exists in store. %v", hashId, conn)
+		log.Errorf("BUG. connection with hash %x already exists in store. %v", hashID, conn)
 	}
-	cs.hashId2groupIdx[hashId] = groupIdx
+	cs.hashID2groupIdx[hashID] = groupIdx
 
 	groupLabel := cs.groups[groupIdx].labelValue
 	activeLen := cs.groups[groupIdx].activeMom.Len()
 	cs.metrics.connStoreLength.WithLabelValues(groupLabel, activeLabel).Set(float64(activeLen))
 }
 
-func (cs *connectionStore) getConnection(hashId uint64) (connection, bool, bool) {
-	groupIdx, found := cs.hashId2groupIdx[hashId]
+func (cs *connectionStore) getConnection(hashID uint64) (connection, bool, bool) {
+	groupIdx, found := cs.hashID2groupIdx[hashID]
 	if !found {
 		return nil, false, false
 	}
@@ -92,12 +92,12 @@ func (cs *connectionStore) getConnection(hashId uint64) (connection, bool, bool)
 
 	// get connection from active map
 	isRunning := true
-	record, ok := mom.GetRecord(utils.Key(hashId))
+	record, ok := mom.GetRecord(utils.Key(hashID))
 	if !ok {
 		// fallback on terminating map if not found
 		isRunning = false
 		mom := cs.groups[groupIdx].terminatingMom
-		record, ok = mom.GetRecord(utils.Key(hashId))
+		record, ok = mom.GetRecord(utils.Key(hashID))
 		if !ok {
 			return nil, false, false
 		}
@@ -106,16 +106,16 @@ func (cs *connectionStore) getConnection(hashId uint64) (connection, bool, bool)
 	return conn, true, isRunning
 }
 
-func (cs *connectionStore) setConnectionTerminating(hashId uint64) {
-	conn, ok, active := cs.getConnection(hashId)
+func (cs *connectionStore) setConnectionTerminating(hashID uint64) {
+	conn, ok, active := cs.getConnection(hashID)
 	if !ok {
-		log.Panicf("BUG. connection hash %x doesn't exist", hashId)
+		log.Panicf("BUG. connection hash %x doesn't exist", hashID)
 		return
 	} else if !active {
 		// connection is terminating
 		return
 	}
-	groupIdx := cs.hashId2groupIdx[hashId]
+	groupIdx := cs.hashID2groupIdx[hashID]
 	groupLabel := cs.groups[groupIdx].labelValue
 	activeMom := cs.groups[groupIdx].activeMom
 	terminatingMom := cs.groups[groupIdx].terminatingMom
@@ -123,58 +123,58 @@ func (cs *connectionStore) setConnectionTerminating(hashId uint64) {
 	newExpiryTime := cs.now().Add(timeout)
 	conn.setExpiryTime(newExpiryTime)
 	// Remove connection from active map
-	activeMom.RemoveRecord(utils.Key(hashId))
+	activeMom.RemoveRecord(utils.Key(hashID))
 	activeLen := cs.groups[groupIdx].activeMom.Len()
 	cs.metrics.connStoreLength.WithLabelValues(groupLabel, activeLabel).Set(float64(activeLen))
 	// Add connection to terminating map
-	err := terminatingMom.AddRecord(utils.Key(hashId), conn)
+	err := terminatingMom.AddRecord(utils.Key(hashID), conn)
 	if err != nil {
-		log.Errorf("BUG. connection with hash %x already exists in store. %v", hashId, conn)
+		log.Errorf("BUG. connection with hash %x already exists in store. %v", hashID, conn)
 	}
 	terminatingLen := cs.groups[groupIdx].terminatingMom.Len()
 	cs.metrics.connStoreLength.WithLabelValues(groupLabel, terminatingLabel).Set(float64(terminatingLen))
 }
 
-func (cs *connectionStore) updateConnectionExpiryTime(hashId uint64) {
-	conn, ok, active := cs.getConnection(hashId)
+func (cs *connectionStore) updateConnectionExpiryTime(hashID uint64) {
+	conn, ok, active := cs.getConnection(hashID)
 	if !ok {
-		log.Panicf("BUG. connection hash %x doesn't exist", hashId)
+		log.Panicf("BUG. connection hash %x doesn't exist", hashID)
 		return
 	} else if !active {
 		// connection is terminating. expiry time can't be updated anymore
 		return
 	}
-	groupIdx := cs.hashId2groupIdx[hashId]
+	groupIdx := cs.hashID2groupIdx[hashID]
 	mom := cs.groups[groupIdx].activeMom
 	timeout := cs.groups[groupIdx].scheduling.EndConnectionTimeout.Duration
 	newExpiryTime := cs.now().Add(timeout)
 	conn.setExpiryTime(newExpiryTime)
 	// Move to the back of the list
-	err := mom.MoveToBack(utils.Key(hashId), expiryOrder)
+	err := mom.MoveToBack(utils.Key(hashID), expiryOrder)
 	if err != nil {
-		log.Panicf("BUG. Can't update connection expiry time for hash %x: %v", hashId, err)
+		log.Panicf("BUG. Can't update connection expiry time for hash %x: %v", hashID, err)
 		return
 	}
 }
 
-func (cs *connectionStore) updateNextHeartbeatTime(hashId uint64) {
-	conn, ok, active := cs.getConnection(hashId)
+func (cs *connectionStore) updateNextHeartbeatTime(hashID uint64) {
+	conn, ok, active := cs.getConnection(hashID)
 	if !ok {
-		log.Panicf("BUG. connection hash %x doesn't exist", hashId)
+		log.Panicf("BUG. connection hash %x doesn't exist", hashID)
 		return
 	} else if !active {
 		// connection is terminating. heartbeat are disabled
 		return
 	}
-	groupIdx := cs.hashId2groupIdx[hashId]
+	groupIdx := cs.hashID2groupIdx[hashID]
 	mom := cs.groups[groupIdx].activeMom
 	timeout := cs.groups[groupIdx].scheduling.HeartbeatInterval.Duration
 	newNextHeartbeatTime := cs.now().Add(timeout)
 	conn.setNextHeartbeatTime(newNextHeartbeatTime)
 	// Move to the back of the list
-	err := mom.MoveToBack(utils.Key(hashId), nextHeartbeatTimeOrder)
+	err := mom.MoveToBack(utils.Key(hashID), nextHeartbeatTimeOrder)
 	if err != nil {
-		log.Panicf("BUG. Can't update next heartbeat time for hash %x: %v", hashId, err)
+		log.Panicf("BUG. Can't update next heartbeat time for hash %x: %v", hashID, err)
 		return
 	}
 }
@@ -189,7 +189,7 @@ func (cs *connectionStore) popEndConnectionOfMap(mom *utils.MultiOrderedMap, gro
 			// The connection has expired. We want to pop it.
 			poppedConnections = append(poppedConnections, conn)
 			shouldDelete, shouldStop = true, false
-			delete(cs.hashId2groupIdx, conn.getHash().hashTotal)
+			delete(cs.hashID2groupIdx, conn.getHash().hashTotal)
 		} else {
 			// No more expired connections
 			shouldDelete, shouldStop = false, true
@@ -251,7 +251,7 @@ func (cs *connectionStore) prepareHeartbeats() []connection {
 }
 
 func (cs *connectionStore) len() int {
-	return len(cs.hashId2groupIdx)
+	return len(cs.hashID2groupIdx)
 }
 
 // schedulingGroupToLabelValue returns a string representation of a scheduling group to be used as a Prometheus label
@@ -286,7 +286,7 @@ func newConnectionStore(scheduling []api.ConnTrackSchedulingGroup, metrics *metr
 
 	cs := &connectionStore{
 		groups:          groups,
-		hashId2groupIdx: map[uint64]int{},
+		hashID2groupIdx: map[uint64]int{},
 		metrics:         metrics,
 		now:             nowFunc,
 	}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go
index b6d38cf970fbb6bac2e6639bba3096af75f691af..2e695129bb1b041bcfdaf21191e5ee0aa2043e3f 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go
@@ -19,19 +19,19 @@ package conntrack
 
 // From: https://github.com/netobserv/netobserv-ebpf-agent/blob/c54e7eb9e37e8ef5bb948eff6141cdddf584a6f9/bpf/flows.c#L45-L56
 const (
-	FIN_FLAG = uint32(0x01)
-	SYN_FLAG = uint32(0x02)
-	RST_FLAG = uint32(0x04)
-	PSH_FLAG = uint32(0x08)
-	ACK_FLAG = uint32(0x10)
-	URG_FLAG = uint32(0x20)
-	ECE_FLAG = uint32(0x40)
-	CWR_FLAG = uint32(0x80)
+	FINFlag = uint32(0x01)
+	SYNFlag = uint32(0x02)
+	RSTFlag = uint32(0x04)
+	PSHFlag = uint32(0x08)
+	ACKFlag = uint32(0x10)
+	URGFlag = uint32(0x20)
+	ECEFlag = uint32(0x40)
+	CWRFlag = uint32(0x80)
 	// Custom flags
-	SYN_ACK_FLAG = uint32(0x100)
-	FIN_ACK_FLAG = uint32(0x200)
-	RST_ACK_FLAG = uint32(0x400)
-	// Note: The difference between SYN_FLAG | ACK_FLAG (0x12) and SYN_ACK_FLAG (0x100) is that the former indicates
+	SYNACKFlag = uint32(0x100)
+	FINACKFlag = uint32(0x200)
+	RSTACKFlag = uint32(0x400)
+	// Note: The difference between SYNFlag | ACKFlag (0x12) and SYN_ACKFlag (0x100) is that the former indicates
 	// that a flowlog contains TCP packets with the SYN flag set and the ACK flag set, but not necessary in the same packet.
 	// While the latter indicates that a flowlog contains a TCP packet with both flags set.
 )
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go
index ed2ed740bba9bdd03b9f7991d0a6047475997748..cc982ae934e4b4a0c3be442dfaf9dc099ff7516f 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go
@@ -19,16 +19,16 @@ package extract
 
 import (
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
-	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate"
+	agg "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate"
 	log "github.com/sirupsen/logrus"
 )
 
-type ExtractAggregate struct {
-	Aggregates aggregate.Aggregates
+type aggregates struct {
+	agg.Aggregates
 }
 
 // Extract extracts a flow before being stored
-func (ea *ExtractAggregate) Extract(entries []config.GenericMap) []config.GenericMap {
+func (ea *aggregates) Extract(entries []config.GenericMap) []config.GenericMap {
 	err := ea.Aggregates.Evaluate(entries)
 	if err != nil {
 		log.Debugf("Evaluate error %v", err)
@@ -42,13 +42,13 @@ func (ea *ExtractAggregate) Extract(entries []config.GenericMap) []config.Generi
 // NewExtractAggregate creates a new extractor
 func NewExtractAggregate(params config.StageParam) (Extractor, error) {
 	log.Debugf("entering NewExtractAggregate")
-	aggregates, err := aggregate.NewAggregatesFromConfig(params.Extract.Aggregates)
+	cfg, err := agg.NewAggregatesFromConfig(params.Extract.Aggregates)
 	if err != nil {
 		log.Errorf("error in NewAggregatesFromConfig: %v", err)
 		return nil, err
 	}
 
-	return &ExtractAggregate{
-		Aggregates: aggregates,
+	return &aggregates{
+		Aggregates: cfg,
 	}, nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go
index 56bf317099ab3e154737b81f67a5f3f3f28d6a5e..4d93b048529746f3eb6416f8be0e40553dd2bcb9 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go
@@ -22,28 +22,29 @@ import (
 
 	"github.com/netobserv/flowlogs-pipeline/pkg/api"
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
-	"github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased"
+	tb "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased"
 	log "github.com/sirupsen/logrus"
 )
 
-type ExtractTimebased struct {
-	Filters         []timebased.FilterStruct
-	IndexKeyStructs map[string]*timebased.IndexKeyTable
+type timebased struct {
+	Filters         []tb.FilterStruct
+	IndexKeyStructs map[string]*tb.IndexKeyTable
 }
 
 // Extract extracts a flow before being stored
-func (et *ExtractTimebased) Extract(entries []config.GenericMap) []config.GenericMap {
-	log.Debugf("entering ExtractTimebased Extract")
+func (et *timebased) Extract(entries []config.GenericMap) []config.GenericMap {
+	log.Debugf("entering timebased Extract")
 	nowInSecs := time.Now()
 	// Populate the Table with the current entries
 	for _, entry := range entries {
-		log.Debugf("ExtractTimebased Extract, entry = %v", entry)
-		timebased.AddEntryToTables(et.IndexKeyStructs, entry, nowInSecs)
+		log.Debugf("timebased Extract, entry = %v", entry)
+		tb.AddEntryToTables(et.IndexKeyStructs, entry, nowInSecs)
 	}
 
 	output := make([]config.GenericMap, 0)
 	// Calculate Filters based on time windows
-	for _, filter := range et.Filters {
+	for i := range et.Filters {
+		filter := &et.Filters[i]
 		filter.CalculateResults(nowInSecs)
 		filter.ComputeTopkBotk()
 		genMap := filter.CreateGenericMap()
@@ -52,7 +53,7 @@ func (et *ExtractTimebased) Extract(entries []config.GenericMap) []config.Generi
 	log.Debugf("output of extract timebased: %v", output)
 
 	// delete entries from tables that are outside time windows
-	timebased.DeleteOldEntriesFromTables(et.IndexKeyStructs, nowInSecs)
+	tb.DeleteOldEntriesFromTables(et.IndexKeyStructs, nowInSecs)
 
 	return output
 }
@@ -65,9 +66,9 @@ func NewExtractTimebased(params config.StageParam) (Extractor, error) {
 	}
 	log.Debugf("NewExtractTimebased; rules = %v", rules)
 
-	tmpIndexKeyStructs, tmpFilters := timebased.CreateIndexKeysAndFilters(rules)
+	tmpIndexKeyStructs, tmpFilters := tb.CreateIndexKeysAndFilters(rules)
 
-	return &ExtractTimebased{
+	return &timebased{
 		Filters:         tmpFilters,
 		IndexKeyStructs: tmpIndexKeyStructs,
 	}, nil
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go
index c556ec975fd2e772576504771d3919f6f1428c89..cd47c2188d75a90ac95bcd79da017847d56003d0 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go
@@ -35,8 +35,9 @@ func (fs *FilterStruct) CalculateResults(nowInSecs time.Time) {
 	for tableKey, l := range fs.IndexKeyDataTable.dataTableMap {
 		var valueFloat64 = float64(0)
 		var err error
+		//nolint:exhaustive
 		switch fs.Rule.OperationType {
-		case api.FilterOperationName("FilterOperationLast"):
+		case api.FilterOperationLast:
 			// handle empty list
 			if l.Len() == 0 {
 				continue
@@ -45,7 +46,7 @@ func (fs *FilterStruct) CalculateResults(nowInSecs time.Time) {
 			if err != nil {
 				continue
 			}
-		case api.FilterOperationName("FilterOperationDiff"):
+		case api.FilterOperationDiff:
 			for e := l.Front(); e != nil; e = e.Next() {
 				cEntry := e.Value.(*TableEntry)
 				if cEntry.timeStamp.Before(oldestValidTime) {
@@ -90,35 +91,36 @@ func (fs *FilterStruct) CalculateValue(l *list.List, oldestValidTime time.Time)
 		} else {
 			nItems++
 			switch fs.Rule.OperationType {
-			case api.FilterOperationName("FilterOperationSum"), api.FilterOperationName("FilterOperationAvg"):
+			case api.FilterOperationSum, api.FilterOperationAvg:
 				currentValue += valueFloat64
-			case api.FilterOperationName("FilterOperationMax"):
+			case api.FilterOperationMax:
 				currentValue = math.Max(currentValue, valueFloat64)
-			case api.FilterOperationName("FilterOperationMin"):
+			case api.FilterOperationMin:
 				currentValue = math.Min(currentValue, valueFloat64)
+			case api.FilterOperationCnt, api.FilterOperationLast, api.FilterOperationDiff:
 			}
 		}
 	}
-	if fs.Rule.OperationType == api.FilterOperationName("FilterOperationAvg") && nItems > 0 {
+	if fs.Rule.OperationType == api.FilterOperationAvg && nItems > 0 {
 		currentValue = currentValue / float64(nItems)
 	}
-	if fs.Rule.OperationType == api.FilterOperationName("FilterOperationCnt") {
+	if fs.Rule.OperationType == api.FilterOperationCnt {
 		currentValue = float64(nItems)
 	}
 	return currentValue
 }
 
-func getInitValue(operation string) float64 {
+func getInitValue(operation api.FilterOperationEnum) float64 {
 	switch operation {
-	case api.FilterOperationName("FilterOperationSum"),
-		api.FilterOperationName("FilterOperationAvg"),
-		api.FilterOperationName("FilterOperationCnt"),
-		api.FilterOperationName("FilterOperationLast"),
-		api.FilterOperationName("FilterOperationDiff"):
+	case api.FilterOperationSum,
+		api.FilterOperationAvg,
+		api.FilterOperationCnt,
+		api.FilterOperationLast,
+		api.FilterOperationDiff:
 		return 0
-	case api.FilterOperationName("FilterOperationMax"):
+	case api.FilterOperationMax:
 		return (-math.MaxFloat64)
-	case api.FilterOperationName("FilterOperationMin"):
+	case api.FilterOperationMin:
 		return math.MaxFloat64
 	default:
 		log.Panicf("unknown operation %v", operation)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go
index 3167b636d57e0fdda1f135c040e3610e271e99c6..5c52dc65618abecf46f151ec0602696067dac2e7 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go
@@ -59,12 +59,12 @@ func (h *topkHeap) Pop() interface{} {
 	return x
 }
 
-func (filter *FilterStruct) computeTopK(inputs filterOperationResults) []filterOperationResult {
+func (fs *FilterStruct) computeTopK(inputs filterOperationResults) []filterOperationResult {
 	// maintain a heap with k items, always dropping the lowest
 	// we will be left with the TopK items
 	var prevMin float64
 	prevMin = -math.MaxFloat64
-	topk := filter.Rule.TopK
+	topk := fs.Rule.TopK
 	h := &topkHeap{}
 	for key, metricMap := range inputs {
 		val := metricMap.operationResult
@@ -120,12 +120,12 @@ func (h *botkHeap) Pop() interface{} {
 	return x
 }
 
-func (filter *FilterStruct) computeBotK(inputs filterOperationResults) []filterOperationResult {
+func (fs *FilterStruct) computeBotK(inputs filterOperationResults) []filterOperationResult {
 	// maintain a heap with k items, always dropping the highest
 	// we will be left with the BotK items
 	var prevMax float64
 	prevMax = math.MaxFloat64
-	botk := filter.Rule.TopK
+	botk := fs.Rule.TopK
 	h := &botkHeap{}
 	for key, metricMap := range inputs {
 		val := metricMap.operationResult
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go
index c58351b6ebf8a8622b0739bf3b44d1371a94e9f9..0a003f57e13945cb57e211b4e9a65d01659fa862 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go
@@ -87,13 +87,13 @@ func CreateIndexKeysAndFilters(rules []api.TimebasedFilterRule) (map[string]*Ind
 		}
 		// verify the validity of the OperationType field in the filterRule
 		switch filterRule.OperationType {
-		case api.FilterOperationName("FilterOperationLast"),
-			api.FilterOperationName("FilterOperationDiff"),
-			api.FilterOperationName("FilterOperationCnt"),
-			api.FilterOperationName("FilterOperationAvg"),
-			api.FilterOperationName("FilterOperationMax"),
-			api.FilterOperationName("FilterOperationMin"),
-			api.FilterOperationName("FilterOperationSum"):
+		case api.FilterOperationLast,
+			api.FilterOperationDiff,
+			api.FilterOperationCnt,
+			api.FilterOperationAvg,
+			api.FilterOperationMax,
+			api.FilterOperationMin,
+			api.FilterOperationSum:
 			// OK; nothing to do
 		default:
 			log.Errorf("illegal operation type %s", filterRule.OperationType)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go
index 4a8ecb719e7a72f3fa131c029dd497067a300bb4..bbe1a356dad9ebc391a053952138b2e3dfa9c9e0 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go
@@ -29,10 +29,10 @@ import (
 	"github.com/netobserv/flowlogs-pipeline/pkg/operational"
 	pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils"
 	"github.com/netsampler/goflow2/decoders/netflow/templates"
-	_ "github.com/netsampler/goflow2/decoders/netflow/templates/memory"
+	_ "github.com/netsampler/goflow2/decoders/netflow/templates/memory" // required for goflow in-memory templates
 	goflowFormat "github.com/netsampler/goflow2/format"
 	goflowCommonFormat "github.com/netsampler/goflow2/format/common"
-	_ "github.com/netsampler/goflow2/format/protobuf"
+	_ "github.com/netsampler/goflow2/format/protobuf" // required for goflow protobuf
 	goflowpb "github.com/netsampler/goflow2/pb"
 	"github.com/netsampler/goflow2/utils"
 	log "github.com/sirupsen/logrus"
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go
index a9472d592015360210336f80f2555c00f3fcbc98..f4d7ef3b56932bc5ae246fb11e4d789b956f49c0 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go
@@ -26,7 +26,7 @@ import (
 	log "github.com/sirupsen/logrus"
 )
 
-type IngestFake struct {
+type Fake struct {
 	Count    int64
 	params   config.Ingest
 	In       chan config.GenericMap
@@ -34,7 +34,7 @@ type IngestFake struct {
 }
 
 // Ingest reads records from an input channel and writes them as-is to the output channel
-func (inf *IngestFake) Ingest(out chan<- config.GenericMap) {
+func (inf *Fake) Ingest(out chan<- config.GenericMap) {
 	for {
 		select {
 		case <-inf.exitChan:
@@ -54,7 +54,7 @@ func NewIngestFake(params config.StageParam) (Ingester, error) {
 		return nil, fmt.Errorf("ingest not specified")
 	}
 
-	return &IngestFake{
+	return &Fake{
 		params:   *params.Ingest,
 		In:       make(chan config.GenericMap),
 		exitChan: utils.ExitChannel(),
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go
index 2c42da8ba16c12ca3a788f143cb1b3e5c4dd0686..4d1127b6cdefe964508db1b8a6893a60e20c11ce 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go
@@ -29,7 +29,7 @@ import (
 	log "github.com/sirupsen/logrus"
 )
 
-type IngestFile struct {
+type ingestFile struct {
 	params       config.Ingest
 	decoder      decode.Decoder
 	exitChan     <-chan struct{}
@@ -43,7 +43,7 @@ const (
 )
 
 // Ingest ingests entries from a file and resends the same data every delaySeconds seconds
-func (ingestF *IngestFile) Ingest(out chan<- config.GenericMap) {
+func (ingestF *ingestFile) Ingest(out chan<- config.GenericMap) {
 	var filename string
 	if ingestF.params.File != nil {
 		filename = ingestF.params.File.Filename
@@ -95,7 +95,7 @@ func (ingestF *IngestFile) Ingest(out chan<- config.GenericMap) {
 	}
 }
 
-func (ingestF *IngestFile) sendAllLines(lines [][]byte, out chan<- config.GenericMap) {
+func (ingestF *ingestFile) sendAllLines(lines [][]byte, out chan<- config.GenericMap) {
 	log.Debugf("ingestFile sending %d lines", len(lines))
 	ingestF.TotalRecords = len(lines)
 	for _, line := range lines {
@@ -121,7 +121,7 @@ func NewIngestFile(params config.StageParam) (Ingester, error) {
 		return nil, err
 	}
 
-	return &IngestFile{
+	return &ingestFile{
 		params:   *params.Ingest,
 		exitChan: utils.ExitChannel(),
 		decoder:  decoder,
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go
index c5e41441d789a8f9c86f550d17fc2635c3837016..c5dac331adc19a97fd4f98a52c978c4668f6daf5 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go
@@ -173,6 +173,7 @@ func (k *ingestKafka) reportStats() {
 }
 
 // NewIngestKafka create a new ingester
+// nolint:cyclop
 func NewIngestKafka(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) {
 	klog.Debugf("entering NewIngestKafka")
 	jsonIngestKafka := api.IngestKafka{}
@@ -248,7 +249,7 @@ func NewIngestKafka(opMetrics *operational.Metrics, params config.StageParam) (I
 	readerConfig := kafkago.ReaderConfig{
 		Brokers:        jsonIngestKafka.Brokers,
 		Topic:          jsonIngestKafka.Topic,
-		GroupID:        jsonIngestKafka.GroupId,
+		GroupID:        jsonIngestKafka.GroupID,
 		GroupBalancers: groupBalancers,
 		StartOffset:    startOffset,
 		CommitInterval: time.Duration(commitInterval) * time.Millisecond,
@@ -293,6 +294,6 @@ func NewIngestKafka(opMetrics *operational.Metrics, params config.StageParam) (I
 		batchMaxLength:   bml,
 		batchReadTimeout: batchReadTimeout,
 		metrics:          metrics,
-		canLogMessages:   jsonIngestKafka.Decoder.Type == api.DecoderName("JSON"),
+		canLogMessages:   jsonIngestKafka.Decoder.Type == api.DecoderJSON,
 	}, nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go
index 1aa24805680224fdc7da8c2051af87b70a294cdc..1b425cf4d3fa976334d0aa4123c755e3acafcf2b 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go
@@ -102,7 +102,7 @@ func NewIngestStdin(opMetrics *operational.Metrics, params config.StageParam) (I
 	in := make(chan string, stdinChannelSize)
 	eof := make(chan struct{})
 	metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(in) })
-	decoderParams := api.Decoder{Type: api.DecoderName("JSON")}
+	decoderParams := api.Decoder{Type: api.DecoderJSON}
 	decoder, err := decode.GetDecoder(decoderParams)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go
index c8da4e1e1fa0bb7c96ba090fb7cd9e2f541eff2d..66ee857240d832bde27b88103cfb565a1525e937 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go
@@ -28,7 +28,7 @@ import (
 	log "github.com/sirupsen/logrus"
 )
 
-type IngestSynthetic struct {
+type ingestSynthetic struct {
 	params            api.IngestSynthetic
 	exitChan          <-chan struct{}
 	flowLogsProcessed prometheus.Counter
@@ -50,7 +50,7 @@ var (
 )
 
 // Ingest generates flow logs according to provided parameters
-func (ingestS *IngestSynthetic) Ingest(out chan<- config.GenericMap) {
+func (ingestS *ingestSynthetic) Ingest(out chan<- config.GenericMap) {
 	log.Debugf("entering IngestSynthetic Ingest, params = %v", ingestS.params)
 	// get a list of flow log entries, one per desired connection
 	// these flow logs will be sent again and again to simulate ongoing traffic on those connections
@@ -99,7 +99,7 @@ func NewIngestSynthetic(opMetrics *operational.Metrics, params config.StageParam
 	}
 	log.Debugf("params = %v", confIngestSynthetic)
 
-	return &IngestSynthetic{
+	return &ingestSynthetic{
 		params:            confIngestSynthetic,
 		exitChan:          utils.ExitChannel(),
 		flowLogsProcessed: opMetrics.NewCounter(&flowLogsProcessed, params.Name),
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
index 6577b90d8ee342fb6a8082489ce67475a914cfce..5eddfce41333d5e86d395f6785ab9b8d392d1044 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go
@@ -103,7 +103,7 @@ func newBuilder(cfg *config.ConfigFileStruct) *builder {
 func (b *builder) presetIngester(ing ingest.Ingester) {
 	name := config.PresetIngesterStage
 	log.Debugf("stage = %v", name)
-	b.appendEntry(pipelineEntry{
+	b.appendEntry(&pipelineEntry{
 		stageName: name,
 		stageType: StageIngest,
 		Ingester:  ing,
@@ -136,15 +136,15 @@ func (b *builder) readStages() error {
 		if err != nil {
 			return err
 		}
-		b.appendEntry(pEntry)
+		b.appendEntry(&pEntry)
 	}
 	log.Debugf("pipeline = %v", b.pipelineStages)
 	return nil
 }
 
-func (b *builder) appendEntry(pEntry pipelineEntry) {
-	b.pipelineEntryMap[pEntry.stageName] = &pEntry
-	b.pipelineStages = append(b.pipelineStages, &pEntry)
+func (b *builder) appendEntry(pEntry *pipelineEntry) {
+	b.pipelineEntryMap[pEntry.stageName] = pEntry
+	b.pipelineStages = append(b.pipelineStages, pEntry)
 	log.Debugf("pipeline = %v", b.pipelineStages)
 }
 
@@ -386,7 +386,7 @@ func getWriter(opMetrics *operational.Metrics, params config.StageParam) (write.
 	return writer, err
 }
 
-func getTransformer(opMetrics *operational.Metrics, params config.StageParam) (transform.Transformer, error) {
+func getTransformer(_ *operational.Metrics, params config.StageParam) (transform.Transformer, error) {
 	var transformer transform.Transformer
 	var err error
 	switch params.Transform.Type {
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
index e6ed55bc62f7988b4ec64274acc3b493d8fef813..30bc93e355913edeb3321385bea2789df003f584 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go
@@ -48,7 +48,7 @@ func Enrich(outputEntry config.GenericMap, rule api.K8sRule) {
 				outputEntry[rule.Output+"_HostName"] = kubeInfo.HostName
 			}
 		}
-		fillInK8sZone(outputEntry, rule, *kubeInfo, "_Zone")
+		fillInK8sZone(outputEntry, rule, kubeInfo, "_Zone")
 	} else {
 		// NOTE: Some of these fields are taken from opentelemetry specs.
 		// See https://opentelemetry.io/docs/specs/semconv/resource/k8s/
@@ -82,13 +82,13 @@ func Enrich(outputEntry config.GenericMap, rule api.K8sRule) {
 				outputEntry[rule.Output+"k8s.host.name"] = kubeInfo.HostName
 			}
 		}
-		fillInK8sZone(outputEntry, rule, *kubeInfo, "k8s.zone")
+		fillInK8sZone(outputEntry, rule, kubeInfo, "k8s.zone")
 	}
 }
 
 const nodeZoneLabelName = "topology.kubernetes.io/zone"
 
-func fillInK8sZone(outputEntry config.GenericMap, rule api.K8sRule, kubeInfo inf.Info, zonePrefix string) {
+func fillInK8sZone(outputEntry config.GenericMap, rule api.K8sRule, kubeInfo *inf.Info, zonePrefix string) {
 	if !rule.AddZone {
 		//Nothing to do
 		return
@@ -120,7 +120,7 @@ func fillInK8sZone(outputEntry config.GenericMap, rule api.K8sRule, kubeInfo inf
 	}
 }
 
-func EnrichLayer(outputEntry config.GenericMap, rule api.K8sInfraRule) {
+func EnrichLayer(outputEntry config.GenericMap, rule *api.K8sInfraRule) {
 	outputEntry[rule.Output] = "infra"
 	for _, input := range rule.Inputs {
 		if objectIsApp(fmt.Sprintf("%s", outputEntry[input]), rule) {
@@ -130,7 +130,7 @@ func EnrichLayer(outputEntry config.GenericMap, rule api.K8sInfraRule) {
 	}
 }
 
-func objectIsApp(addr string, rule api.K8sInfraRule) bool {
+func objectIsApp(addr string, rule *api.K8sInfraRule) bool {
 	obj, err := informers.GetInfo(addr)
 	if err != nil {
 		logrus.WithError(err).Tracef("can't find kubernetes info for IP %s", addr)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
index 9d6307308041695c5d87a5c39922524b792b1b72..1d41ad03d62edbfba4d9fee57cbea4301643e023 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go
@@ -8,18 +8,18 @@ import (
 	"k8s.io/client-go/tools/cache"
 )
 
-type InformersMock struct {
+type Mock struct {
 	mock.Mock
 	InformersInterface
 }
 
-func NewInformersMock() *InformersMock {
-	inf := new(InformersMock)
+func NewInformersMock() *Mock {
+	inf := new(Mock)
 	inf.On("InitFromConfig", mock.Anything).Return(nil)
 	return inf
 }
 
-func (o *InformersMock) InitFromConfig(kubeConfigPath string) error {
+func (o *Mock) InitFromConfig(kubeConfigPath string) error {
 	args := o.Called(kubeConfigPath)
 	return args.Error(0)
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
index 344db71d1772dfa4951d6553fa1903091e3f184b..66048a454b086c2b67b17e0a6b2b3e6d54dcfcc7 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go
@@ -50,6 +50,7 @@ const (
 
 var log = logrus.WithField("component", "transform.Network.Kubernetes")
 
+//nolint:revive
 type InformersInterface interface {
 	GetInfo(string) (*Info, error)
 	GetNodeInfo(string) (*Info, error)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
index c0bfdc5a633f4be567cb074078b0aebc1b58e50d..42f9ab4a7e0fb3ce0fd45c46075ce9e7f61e965d 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go
@@ -47,7 +47,7 @@ const (
 	dbFileLocation    = "/tmp/location_db.bin"
 	dbZIPFileLocation = "/tmp/location_db.bin" + ".zip"
 	// REF: Original location from ip2location DB is: "https://www.ip2location.com/download/?token=OpOljbgT6K2WJnFrFBBmBzRVNpHlcYqNN4CMeGavvh0pPOpyu16gKQyqvDMxTDF4&file=DB9LITEBIN"
-	dbUrl = "https://raw.githubusercontent.com/netobserv/flowlogs-pipeline/main/contrib/location/location.db"
+	dbURL = "https://raw.githubusercontent.com/netobserv/flowlogs-pipeline/main/contrib/location/location.db"
 )
 
 var locationDB *ip2location.DB
@@ -70,7 +70,7 @@ func init() {
 	_osio.MkdirAll = os.MkdirAll
 	_osio.OpenFile = os.OpenFile
 	_osio.Copy = io.Copy
-	_dbURL = dbUrl
+	_dbURL = dbURL
 	locationDBMutex = &sync.Mutex{}
 }
 
@@ -82,7 +82,7 @@ func InitLocationDB() error {
 		log.Infof("Downloading location DB into local file %s ", dbFileLocation)
 		out, createErr := _osio.Create(dbZIPFileLocation)
 		if createErr != nil {
-			return fmt.Errorf("failed os.Create %v ", createErr)
+			return fmt.Errorf("failed os.Create %w ", createErr)
 		}
 
 		timeout := time.Minute
@@ -90,26 +90,26 @@ func InitLocationDB() error {
 		client := &http.Client{Transport: tr, Timeout: timeout}
 		resp, getErr := client.Get(_dbURL)
 		if getErr != nil {
-			return fmt.Errorf("failed http.Get %v ", getErr)
+			return fmt.Errorf("failed http.Get %w ", getErr)
 		}
 
 		log.Infof("Got response %s", resp.Status)
 
 		written, copyErr := io.Copy(out, resp.Body)
 		if copyErr != nil {
-			return fmt.Errorf("failed io.Copy %v ", copyErr)
+			return fmt.Errorf("failed io.Copy %w ", copyErr)
 		}
 
 		log.Infof("Wrote %d bytes to %s", written, dbZIPFileLocation)
 
 		bodyCloseErr := resp.Body.Close()
 		if bodyCloseErr != nil {
-			return fmt.Errorf("failed resp.Body.Close %v ", bodyCloseErr)
+			return fmt.Errorf("failed resp.Body.Close %w ", bodyCloseErr)
 		}
 
 		outCloseErr := out.Close()
 		if outCloseErr != nil {
-			return fmt.Errorf("failed out.Close %v ", outCloseErr)
+			return fmt.Errorf("failed out.Close %w ", outCloseErr)
 		}
 
 		unzipErr := unzip(dbZIPFileLocation, dbFileLocation)
@@ -134,7 +134,7 @@ func InitLocationDB() error {
 				log.Infof("os.ReadFile err %v", readFileErr)
 			}
 
-			return fmt.Errorf("failed unzip %v ", unzipErr)
+			return fmt.Errorf("failed unzip %w ", unzipErr)
 		}
 
 		log.Infof("Download completed successfully")
@@ -143,32 +143,32 @@ func InitLocationDB() error {
 	log.Debugf("Loading location DB")
 	db, openDBErr := ip2location.OpenDB(dbFileLocation + "/" + dbFilename)
 	if openDBErr != nil {
-		return fmt.Errorf("OpenDB err - %v ", openDBErr)
+		return fmt.Errorf("OpenDB err - %w ", openDBErr)
 	}
 
 	locationDB = db
 	return nil
 }
 
-func GetLocation(ip string) (error, *Info) {
+func GetLocation(ip string) (*Info, error) {
 
 	if locationDB == nil {
-		return fmt.Errorf("no location DB available"), nil
+		return nil, fmt.Errorf("no location DB available")
 	}
 
 	res, err := locationDB.Get_all(ip)
 	if err != nil {
-		return err, nil
+		return nil, err
 	}
 
-	return nil, &Info{
+	return &Info{
 		CountryName:     res.Country_short,
 		CountryLongName: res.Country_long,
 		RegionName:      res.Region,
 		CityName:        res.City,
 		Latitude:        fmt.Sprintf("%f", res.Latitude),
 		Longitude:       fmt.Sprintf("%f", res.Longitude),
-	}
+	}, nil
 }
 
 //goland:noinspection ALL
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go
index 0501c36cd1f73b6d34fa6dba1f999b248dbd661f..db233da4d335635503db18e33109150c6268c40a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go
@@ -36,6 +36,8 @@ type Filter struct {
 }
 
 // Transform transforms a flow
+//
+//nolint:cyclop
 func (f *Filter) Transform(entry config.GenericMap) (config.GenericMap, bool) {
 	tlog.Tracef("f = %v", f)
 	outputEntry := entry.Copy()
@@ -43,72 +45,72 @@ func (f *Filter) Transform(entry config.GenericMap) (config.GenericMap, bool) {
 	for _, rule := range f.Rules {
 		tlog.Tracef("rule = %v", rule)
 		switch rule.Type {
-		case api.TransformFilterOperationName("RemoveField"):
-			delete(outputEntry, rule.Input)
-		case api.TransformFilterOperationName("RemoveEntryIfExists"):
-			if _, ok := entry[rule.Input]; ok {
+		case api.RemoveField:
+			delete(outputEntry, rule.RemoveField.Input)
+		case api.RemoveEntryIfExists:
+			if _, ok := entry[rule.RemoveEntryIfExists.Input]; ok {
 				return nil, false
 			}
-		case api.TransformFilterOperationName("RemoveEntryIfDoesntExist"):
-			if _, ok := entry[rule.Input]; !ok {
+		case api.RemoveEntryIfDoesntExist:
+			if _, ok := entry[rule.RemoveEntryIfDoesntExist.Input]; !ok {
 				return nil, false
 			}
-		case api.TransformFilterOperationName("RemoveEntryIfEqual"):
-			if val, ok := entry[rule.Input]; ok {
-				if val == rule.Value {
+		case api.RemoveEntryIfEqual:
+			if val, ok := entry[rule.RemoveEntryIfEqual.Input]; ok {
+				if val == rule.RemoveEntryIfEqual.Value {
 					return nil, false
 				}
 			}
-		case api.TransformFilterOperationName("RemoveEntryIfNotEqual"):
-			if val, ok := entry[rule.Input]; ok {
-				if val != rule.Value {
+		case api.RemoveEntryIfNotEqual:
+			if val, ok := entry[rule.RemoveEntryIfNotEqual.Input]; ok {
+				if val != rule.RemoveEntryIfNotEqual.Value {
 					return nil, false
 				}
 			}
-		case api.TransformFilterOperationName("AddField"):
-			outputEntry[rule.Input] = rule.Value
-		case api.TransformFilterOperationName("AddFieldIfDoesntExist"):
-			if _, ok := entry[rule.Input]; !ok {
-				outputEntry[rule.Input] = rule.Value
+		case api.AddField:
+			outputEntry[rule.AddField.Input] = rule.AddField.Value
+		case api.AddFieldIfDoesntExist:
+			if _, ok := entry[rule.AddFieldIfDoesntExist.Input]; !ok {
+				outputEntry[rule.AddFieldIfDoesntExist.Input] = rule.AddFieldIfDoesntExist.Value
 			}
-		case api.TransformFilterOperationName("AddRegExIf"):
-			matched, err := regexp.MatchString(rule.Parameters, fmt.Sprintf("%s", outputEntry[rule.Input]))
+		case api.AddRegExIf:
+			matched, err := regexp.MatchString(rule.AddRegExIf.Parameters, fmt.Sprintf("%s", outputEntry[rule.AddRegExIf.Input]))
 			if err != nil {
 				continue
 			}
 			if matched {
-				outputEntry[rule.Output] = outputEntry[rule.Input]
-				outputEntry[rule.Output+"_Matched"] = true
+				outputEntry[rule.AddRegExIf.Output] = outputEntry[rule.AddRegExIf.Input]
+				outputEntry[rule.AddRegExIf.Output+"_Matched"] = true
 			}
-		case api.TransformFilterOperationName("AddFieldIf"):
-			expressionString := fmt.Sprintf("val %s", rule.Parameters)
+		case api.AddFieldIf:
+			expressionString := fmt.Sprintf("val %s", rule.AddFieldIf.Parameters)
 			expression, err := govaluate.NewEvaluableExpression(expressionString)
 			if err != nil {
 				log.Warningf("Can't evaluate AddIf rule: %+v expression: %v. err %v", rule, expressionString, err)
 				continue
 			}
-			result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": outputEntry[rule.Input]})
+			result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": outputEntry[rule.AddFieldIf.Input]})
 			if evaluateErr == nil && result.(bool) {
-				if rule.Assignee != "" {
-					outputEntry[rule.Output] = rule.Assignee
+				if rule.AddFieldIf.Assignee != "" {
+					outputEntry[rule.AddFieldIf.Output] = rule.AddFieldIf.Assignee
 				} else {
-					outputEntry[rule.Output] = outputEntry[rule.Input]
+					outputEntry[rule.AddFieldIf.Output] = outputEntry[rule.AddFieldIf.Input]
 				}
-				outputEntry[rule.Output+"_Evaluate"] = true
+				outputEntry[rule.AddFieldIf.Output+"_Evaluate"] = true
 			}
-		case api.TransformFilterOperationName("AddLabel"):
-			labels[rule.Input], _ = utils.ConvertToString(rule.Value)
-		case api.TransformFilterOperationName("AddLabelIf"):
+		case api.AddLabel:
+			labels[rule.AddLabel.Input], _ = utils.ConvertToString(rule.AddLabel.Value)
+		case api.AddLabelIf:
 			// TODO perhaps add a cache of previously evaluated expressions
-			expressionString := fmt.Sprintf("val %s", rule.Parameters)
+			expressionString := fmt.Sprintf("val %s", rule.AddLabelIf.Parameters)
 			expression, err := govaluate.NewEvaluableExpression(expressionString)
 			if err != nil {
 				log.Warningf("Can't evaluate AddLabelIf rule: %+v expression: %v. err %v", rule, expressionString, err)
 				continue
 			}
-			result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": outputEntry[rule.Input]})
+			result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": outputEntry[rule.AddLabelIf.Input]})
 			if evaluateErr == nil && result.(bool) {
-				labels[rule.Output] = rule.Assignee
+				labels[rule.AddLabelIf.Output] = rule.AddLabelIf.Assignee
 			}
 		default:
 			tlog.Panicf("unknown type %s for transform.Filter rule: %v", rule.Type, rule)
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go
index e43f6ea7773b665232dc2a237b29079f24d7f563..c1f2e4dcf59a320890a3a71cff91e64d61852930 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go
@@ -26,7 +26,7 @@ import (
 var glog = logrus.WithField("component", "transform.Generic")
 
 type Generic struct {
-	policy string
+	policy api.TransformGenericOperationEnum
 	rules  []api.GenericTransformRule
 }
 
@@ -96,7 +96,7 @@ func NewTransformGeneric(params config.StageParam) (Transformer, error) {
 	rules := genConfig.Rules
 	policy := genConfig.Policy
 	switch policy {
-	case "replace_keys", "preserve_original_keys", "":
+	case api.ReplaceKeys, api.PreserveOriginalKeys, "":
 		// valid; nothing to do
 		glog.Infof("NewTransformGeneric, policy = %s", policy)
 	default:
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
index a92be790e87127d974918ed9ee7eb5fb2d07d5df..f51b1717601f704d30d4ce86c168fa11626f8bf3 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go
@@ -47,6 +47,7 @@ type subnetCategory struct {
 	name  string
 }
 
+//nolint:cyclop
 func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bool) {
 	// copy input entry before transform to avoid alteration on parallel stages
 	outputEntry := inputEntry.Copy()
@@ -54,7 +55,7 @@ func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bo
 	// TODO: for efficiency and maintainability, maybe each case in the switch below should be an individual implementation of Transformer
 	for _, rule := range n.Rules {
 		switch rule.Type {
-		case api.OpAddSubnet:
+		case api.NetworkAddSubnet:
 			if rule.AddSubnet == nil {
 				log.Errorf("Missing add subnet configuration")
 				continue
@@ -65,13 +66,13 @@ func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bo
 				continue
 			}
 			outputEntry[rule.AddSubnet.Output] = ipv4Net.String()
-		case api.OpAddLocation:
+		case api.NetworkAddLocation:
 			if rule.AddLocation == nil {
 				log.Errorf("Missing add location configuration")
 				continue
 			}
 			var locationInfo *location.Info
-			err, locationInfo := location.GetLocation(fmt.Sprintf("%s", outputEntry[rule.AddLocation.Input]))
+			locationInfo, err := location.GetLocation(fmt.Sprintf("%s", outputEntry[rule.AddLocation.Input]))
 			if err != nil {
 				log.Warningf("Can't find location for IP %v err %v", outputEntry[rule.AddLocation.Input], err)
 				continue
@@ -82,7 +83,7 @@ func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bo
 			outputEntry[rule.AddLocation.Output+"_CityName"] = locationInfo.CityName
 			outputEntry[rule.AddLocation.Output+"_Latitude"] = locationInfo.Latitude
 			outputEntry[rule.AddLocation.Output+"_Longitude"] = locationInfo.Longitude
-		case api.OpAddService:
+		case api.NetworkAddService:
 			if rule.AddService == nil {
 				log.Errorf("Missing add service configuration")
 				continue
@@ -109,17 +110,17 @@ func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bo
 				}
 			}
 			outputEntry[rule.AddService.Output] = serviceName
-		case api.OpAddKubernetes:
+		case api.NetworkAddKubernetes:
 			kubernetes.Enrich(outputEntry, *rule.Kubernetes)
-		case api.OpAddKubernetesInfra:
+		case api.NetworkAddKubernetesInfra:
 			if rule.KubernetesInfra == nil {
 				logrus.Error("transformation rule: Missing configuration ")
 				continue
 			}
-			kubernetes.EnrichLayer(outputEntry, *rule.KubernetesInfra)
-		case api.OpReinterpretDirection:
+			kubernetes.EnrichLayer(outputEntry, rule.KubernetesInfra)
+		case api.NetworkReinterpretDirection:
 			reinterpretDirection(outputEntry, &n.DirectionInfo)
-		case api.OpAddIPCategory:
+		case api.NetworkAddIPCategory:
 			if rule.AddIPCategory == nil {
 				logrus.Error("AddIPCategory rule: Missing configuration ")
 				continue
@@ -155,6 +156,8 @@ func (n *Network) categorizeIP(ip net.IP) string {
 }
 
 // NewTransformNetwork create a new transform
+//
+//nolint:cyclop
 func NewTransformNetwork(params config.StageParam) (Transformer, error) {
 	var needToInitLocationDB = false
 	var needToInitKubeData = false
@@ -166,22 +169,23 @@ func NewTransformNetwork(params config.StageParam) (Transformer, error) {
 	}
 	for _, rule := range jsonNetworkTransform.Rules {
 		switch rule.Type {
-		case api.OpAddLocation:
+		case api.NetworkAddLocation:
 			needToInitLocationDB = true
-		case api.OpAddKubernetes:
+		case api.NetworkAddKubernetes:
 			needToInitKubeData = true
-		case api.OpAddKubernetesInfra:
+		case api.NetworkAddKubernetesInfra:
 			needToInitKubeData = true
-		case api.OpAddService:
+		case api.NetworkAddService:
 			needToInitNetworkServices = true
-		case api.OpReinterpretDirection:
+		case api.NetworkReinterpretDirection:
 			if err := validateReinterpretDirectionConfig(&jsonNetworkTransform.DirectionInfo); err != nil {
 				return nil, err
 			}
-		case api.OpAddIPCategory:
+		case api.NetworkAddIPCategory:
 			if len(jsonNetworkTransform.IPCategories) == 0 {
-				return nil, fmt.Errorf("a rule '%s' was found, but there are no IP categories configured", api.OpAddIPCategory)
+				return nil, fmt.Errorf("a rule '%s' was found, but there are no IP categories configured", api.NetworkAddIPCategory)
 			}
+		case api.NetworkAddSubnet:
 		}
 	}
 
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go
index f4c55f6876c83fa534d7a2a6e9082522ee3ade6d..5f466088fb952a499da97eee016804212aff1218 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go
@@ -15,16 +15,16 @@ const (
 
 func validateReinterpretDirectionConfig(info *api.NetworkTransformDirectionInfo) error {
 	if info.FlowDirectionField == "" {
-		return fmt.Errorf("invalid config for transform.Network rule %s: missing FlowDirectionField", api.OpReinterpretDirection)
+		return fmt.Errorf("invalid config for transform.Network rule %s: missing FlowDirectionField", api.NetworkReinterpretDirection)
 	}
 	if info.ReporterIPField == "" {
-		return fmt.Errorf("invalid config for transform.Network rule %s: missing ReporterIPField", api.OpReinterpretDirection)
+		return fmt.Errorf("invalid config for transform.Network rule %s: missing ReporterIPField", api.NetworkReinterpretDirection)
 	}
 	if info.SrcHostField == "" {
-		return fmt.Errorf("invalid config for transform.Network rule %s: missing SrcHostField", api.OpReinterpretDirection)
+		return fmt.Errorf("invalid config for transform.Network rule %s: missing SrcHostField", api.NetworkReinterpretDirection)
 	}
 	if info.DstHostField == "" {
-		return fmt.Errorf("invalid config for transform.Network rule %s: missing DstHostField", api.OpReinterpretDirection)
+		return fmt.Errorf("invalid config for transform.Network rule %s: missing DstHostField", api.NetworkReinterpretDirection)
 	}
 	return nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go
index aef69a19b01bd4956373a72a99086224e8d489e3..06d0c75d29b97b5164752ec53f35ed2ab47fe181 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go
@@ -17,7 +17,7 @@ func SetupSASLMechanism(cfg *api.SASLConfig) (sasl.Mechanism, error) {
 	if err != nil {
 		return nil, err
 	}
-	strId := strings.TrimSpace(string(id))
+	strID := strings.TrimSpace(string(id))
 	// Read password
 	pwd, err := os.ReadFile(cfg.ClientSecretPath)
 	if err != nil {
@@ -26,12 +26,12 @@ func SetupSASLMechanism(cfg *api.SASLConfig) (sasl.Mechanism, error) {
 	strPwd := strings.TrimSpace(string(pwd))
 	var mechanism sasl.Mechanism
 	switch cfg.Type {
-	case api.SASLTypeName("Plain"):
-		mechanism = plain.Mechanism{Username: strId, Password: strPwd}
-	case api.SASLTypeName("ScramSHA512"):
-		mechanism, err = scram.Mechanism(scram.SHA512, strId, strPwd)
+	case api.SASLPlain:
+		mechanism = plain.Mechanism{Username: strID, Password: strPwd}
+	case api.SASLScramSHA512:
+		mechanism, err = scram.Mechanism(scram.SHA512, strID, strPwd)
 	default:
-		return nil, fmt.Errorf("Unknown SASL type: %s", cfg.Type)
+		return nil, fmt.Errorf("unknown SASL type: %s", cfg.Type)
 	}
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go
index d8e3e9819b690d7018035849b1bc11181a5a9b66..df945f4b63ea4e69f9b22e930be9b389ff0790ac 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go
@@ -59,16 +59,15 @@ func (tc *TimedCache) GetCacheEntry(key string) (interface{}, bool) {
 	cEntry, ok := tc.cacheMap[key]
 	if ok {
 		return cEntry.SourceEntry, ok
-	} else {
-		return nil, ok
 	}
+	return nil, ok
 }
 
 var uclog = log.WithField("method", "UpdateCacheEntry")
 
 // If cache entry exists, update it and return it; if it does not exist, create it if there is room.
 // If we exceed the size of the cache, then do not allocate new entry
-func (tc *TimedCache) UpdateCacheEntry(key string, entry interface{}) (*cacheEntry, bool) {
+func (tc *TimedCache) UpdateCacheEntry(key string, entry interface{}) bool {
 	nowInSecs := time.Now()
 	tc.mu.Lock()
 	defer tc.mu.Unlock()
@@ -81,7 +80,7 @@ func (tc *TimedCache) UpdateCacheEntry(key string, entry interface{}) (*cacheEnt
 	} else {
 		// create new entry for cache
 		if (tc.maxEntries > 0) && (tc.cacheList.Len() >= tc.maxEntries) {
-			return nil, false
+			return false
 		}
 		cEntry = &cacheEntry{
 			lastUpdatedTime: nowInSecs,
@@ -96,7 +95,7 @@ func (tc *TimedCache) UpdateCacheEntry(key string, entry interface{}) (*cacheEnt
 			tc.cacheLenMetric.Inc()
 		}
 	}
-	return cEntry, true
+	return true
 }
 
 func (tc *TimedCache) GetCacheLen() int {
@@ -174,7 +173,7 @@ func NewQuietExpiringTimedCache(expiry time.Duration) *TimedCache {
 			case <-ExitChannel():
 				return
 			case <-ticker.C:
-				l.CleanupExpiredEntries(expiry, func(entry interface{}) {})
+				l.CleanupExpiredEntries(expiry, func(_ interface{}) {})
 			}
 		}
 	}()
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go
index da05d22070a6e1af704c34a5a4d6b5aa6f14133a..2f6ed477519825cbaf9013885d17e192e5d0eb8e 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go
@@ -27,21 +27,21 @@ import (
 type Writer interface {
 	Write(in config.GenericMap)
 }
-type WriteNone struct {
+type None struct {
 	// synchronized access to avoid race conditions
 	mt          sync.Mutex
 	prevRecords []config.GenericMap
 }
 
 // Write writes entries
-func (t *WriteNone) Write(in config.GenericMap) {
+func (t *None) Write(in config.GenericMap) {
 	logrus.Debugf("entering Write none, in = %v", in)
 	t.mt.Lock()
 	t.prevRecords = append(t.prevRecords, in)
 	t.mt.Unlock()
 }
 
-func (t *WriteNone) PrevRecords() []config.GenericMap {
+func (t *None) PrevRecords() []config.GenericMap {
 	t.mt.Lock()
 	defer t.mt.Unlock()
 	var copies []config.GenericMap
@@ -53,5 +53,5 @@ func (t *WriteNone) PrevRecords() []config.GenericMap {
 
 // NewWriteNone create a new write
 func NewWriteNone() (Writer, error) {
-	return &WriteNone{}, nil
+	return &None{}, nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go
index 518573a1c83b2a856b07da3025d2513e3771d2a7..362415e7b99c42fb6a0ef340c3832accd5312e28 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go
@@ -24,21 +24,21 @@ import (
 	"github.com/sirupsen/logrus"
 )
 
-type WriteFake struct {
+type Fake struct {
 	// access is locked and copied to avoid race condition errors during tests
 	mt         sync.Mutex
 	allRecords []config.GenericMap
 }
 
 // Write stores in memory all records.
-func (w *WriteFake) Write(in config.GenericMap) {
+func (w *Fake) Write(in config.GenericMap) {
 	logrus.Trace("entering writeFake Write")
 	w.mt.Lock()
 	w.allRecords = append(w.allRecords, in.Copy())
 	w.mt.Unlock()
 }
 
-func (w *WriteFake) AllRecords() []config.GenericMap {
+func (w *Fake) AllRecords() []config.GenericMap {
 	w.mt.Lock()
 	defer w.mt.Unlock()
 	var copies []config.GenericMap
@@ -51,6 +51,6 @@ func (w *WriteFake) AllRecords() []config.GenericMap {
 // NewWriteFake creates a new write.
 func NewWriteFake(_ config.StageParam) (Writer, error) {
 	logrus.Debugf("entering NewWriteFake")
-	w := &WriteFake{}
+	w := &Fake{}
 	return w, nil
 }
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go
index cf984a26dd9b5a99edc6b56921cba37d1a324e24..fd75575ad627300251e166a5d1125a4cfac80f17 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go
@@ -20,6 +20,8 @@ package write
 import (
 	"fmt"
 	"net"
+	"strconv"
+	"strings"
 
 	"github.com/netobserv/flowlogs-pipeline/pkg/api"
 	"github.com/netobserv/flowlogs-pipeline/pkg/config"
@@ -40,8 +42,16 @@ type writeIpfix struct {
 	entitiesV6         []entities.InfoElementWithValue
 }
 
+type FieldMap struct {
+	Key      string
+	Getter   func(entities.InfoElementWithValue) any
+	Setter   func(entities.InfoElementWithValue, any)
+	Matcher  func(entities.InfoElementWithValue, any) bool
+	Optional bool
+}
+
 // IPv6Type value as defined in IEEE 802: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml
-const IPv6Type = 0x86DD
+const IPv6Type uint16 = 0x86DD
 
 var (
 	ilog       = logrus.WithField("component", "write.Ipfix")
@@ -78,6 +88,191 @@ var (
 	}
 	CustomNetworkFields = []string{
 		"timeFlowRttNs",
+		"interfaces",
+		"directions",
+	}
+
+	MapIPFIXKeys = map[string]FieldMap{
+		"sourceIPv4Address": {
+			Key:    "SrcAddr",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+		},
+		"destinationIPv4Address": {
+			Key:    "DstAddr",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+		},
+		"sourceIPv6Address": {
+			Key:    "SrcAddr",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+		},
+		"destinationIPv6Address": {
+			Key:    "DstAddr",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) },
+		},
+		"nextHeaderIPv6": {
+			Key:    "Proto",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned8Value() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned8Value(rec.(uint8)) },
+		},
+		"sourceMacAddress": {
+			Key: "SrcMac",
+			Setter: func(elt entities.InfoElementWithValue, rec any) {
+				elt.SetMacAddressValue(net.HardwareAddr(rec.(string)))
+			},
+			Matcher: func(_ entities.InfoElementWithValue, _ any) bool {
+				// Getting some discrepancies here, need to figure out why
+				return true
+			},
+		},
+		"destinationMacAddress": {
+			Key: "DstMac",
+			Setter: func(elt entities.InfoElementWithValue, rec any) {
+				elt.SetMacAddressValue(net.HardwareAddr(rec.(string)))
+			},
+			Matcher: func(_ entities.InfoElementWithValue, _ any) bool {
+				// Getting some discrepancies here, need to figure out why
+				return true
+			},
+		},
+		"ethernetType": {
+			Key:    "Etype",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) },
+		},
+		"flowDirection": {
+			Key: "IfDirections",
+			Setter: func(elt entities.InfoElementWithValue, rec any) {
+				if dirs, ok := rec.([]int); ok && len(dirs) > 0 {
+					elt.SetUnsigned8Value(uint8(dirs[0]))
+				}
+			},
+			Matcher: func(elt entities.InfoElementWithValue, expected any) bool {
+				ifdirs := expected.([]int)
+				return int(elt.GetUnsigned8Value()) == ifdirs[0]
+			},
+		},
+		"directions": {
+			Key: "IfDirections",
+			Getter: func(elt entities.InfoElementWithValue) any {
+				var dirs []int
+				for _, dir := range strings.Split(elt.GetStringValue(), ",") {
+					d, _ := strconv.Atoi(dir)
+					dirs = append(dirs, d)
+				}
+				return dirs
+			},
+			Setter: func(elt entities.InfoElementWithValue, rec any) {
+				if dirs, ok := rec.([]int); ok && len(dirs) > 0 {
+					var asStr []string
+					for _, dir := range dirs {
+						asStr = append(asStr, strconv.Itoa(dir))
+					}
+					elt.SetStringValue(strings.Join(asStr, ","))
+				}
+			},
+		},
+		"protocolIdentifier": {
+			Key:    "Proto",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned8Value() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned8Value(rec.(uint8)) },
+		},
+		"sourceTransportPort": {
+			Key:    "SrcPort",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) },
+		},
+		"destinationTransportPort": {
+			Key:    "DstPort",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) },
+		},
+		"octetDeltaCount": {
+			Key:    "Bytes",
+			Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned64Value() },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(rec.(uint64)) },
+		},
+		"flowStartMilliseconds": {
+			Key:    "TimeFlowStartMs",
+			Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) },
+		},
+		"flowEndMilliseconds": {
+			Key:    "TimeFlowEndMs",
+			Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) },
+		},
+		"packetDeltaCount": {
+			Key:    "Packets",
+			Getter: func(elt entities.InfoElementWithValue) any { return uint32(elt.GetUnsigned64Value()) },
+			Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(uint32))) },
+		},
+		"interfaceName": {
+			Key: "Interfaces",
+			Setter: func(elt entities.InfoElementWithValue, rec any) {
+				if ifs, ok := rec.([]string); ok && len(ifs) > 0 {
+					elt.SetStringValue(ifs[0])
+				}
+			},
+			Matcher: func(elt entities.InfoElementWithValue, expected any) bool {
+				ifs := expected.([]string)
+				return elt.GetStringValue() == ifs[0]
+			},
+		},
+		"interfaces": {
+			Key:    "Interfaces",
+			Getter: func(elt entities.InfoElementWithValue) any { return strings.Split(elt.GetStringValue(), ",") },
+			Setter: func(elt entities.InfoElementWithValue, rec any) {
+				if ifs, ok := rec.([]string); ok {
+					elt.SetStringValue(strings.Join(ifs, ","))
+				}
+			},
+		},
+		"sourcePodNamespace": {
+			Key:      "SrcK8S_Namespace",
+			Getter:   func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+			Optional: true,
+		},
+		"sourcePodName": {
+			Key:      "SrcK8S_Name",
+			Getter:   func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+			Optional: true,
+		},
+		"destinationPodNamespace": {
+			Key:      "DstK8S_Namespace",
+			Getter:   func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+			Optional: true,
+		},
+		"destinationPodName": {
+			Key:      "DstK8S_Name",
+			Getter:   func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+			Optional: true,
+		},
+		"sourceNodeName": {
+			Key:      "SrcK8S_HostName",
+			Getter:   func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+			Optional: true,
+		},
+		"destinationNodeName": {
+			Key:      "DstK8S_HostName",
+			Getter:   func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) },
+			Optional: true,
+		},
+		"timeFlowRttNs": {
+			Key:      "TimeFlowRttNs",
+			Getter:   func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) },
+			Setter:   func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) },
+			Optional: true,
+		},
 	}
 )
 
@@ -155,6 +350,16 @@ func loadCustomRegistry(EnterpriseID uint32) error {
 		ilog.WithError(err).Errorf("Failed to register element")
 		return err
 	}
+	err = registry.PutInfoElement((*entities.NewInfoElement("interfaces", 7741, entities.String, EnterpriseID, 65535)), EnterpriseID)
+	if err != nil {
+		ilog.WithError(err).Errorf("Failed to register element")
+		return err
+	}
+	err = registry.PutInfoElement((*entities.NewInfoElement("directions", 7742, entities.String, EnterpriseID, 65535)), EnterpriseID)
+	if err != nil {
+		ilog.WithError(err).Errorf("Failed to register element")
+		return err
+	}
 	return nil
 }
 
@@ -236,173 +441,28 @@ func SendTemplateRecordv6(exporter *ipfixExporter.ExportingProcess, enrichEnterp
 	return templateID, elements, nil
 }
 
-func setStandardIEValue(record config.GenericMap, ieValPtr *entities.InfoElementWithValue) error {
+//nolint:cyclop
+func setElementValue(record config.GenericMap, ieValPtr *entities.InfoElementWithValue) error {
 	ieVal := *ieValPtr
-	switch ieVal.GetName() {
-	case "ethernetType":
-		if record["Etype"] != nil {
-			ieVal.SetUnsigned16Value(uint16(record["Etype"].(uint32)))
-		} else {
-			return fmt.Errorf("unable to find ethernet type (Etype) in record")
-		}
-	case "flowDirection":
-		dirs := record["IfDirections"].([]int)
-		if len(dirs) > 0 {
-			ieVal.SetUnsigned8Value(uint8(dirs[0]))
-		} else {
-			return fmt.Errorf("unable to find flow direction (flowDirection) in record")
-		}
-	case "sourceMacAddress":
-		if record["SrcMac"] != nil {
-			ieVal.SetMacAddressValue(net.HardwareAddr(record["SrcMac"].(string)))
-		} else {
-			return fmt.Errorf("unable to find source mac address (SrcMac) in record")
-		}
-	case "destinationMacAddress":
-		if record["DstMac"] != nil {
-			ieVal.SetMacAddressValue(net.HardwareAddr(record["DstMac"].(string)))
-		} else {
-			return fmt.Errorf("unable to find dest mac address (DstMac) in record")
-		}
-	case "sourceIPv4Address":
-		if record["SrcAddr"] != nil {
-			ieVal.SetIPAddressValue(net.ParseIP(record["SrcAddr"].(string)))
-		} else {
-			return fmt.Errorf("unable to find source IPv4 address (SrcAddr) in record")
-		}
-	case "destinationIPv4Address":
-		if record["DstAddr"] != nil {
-			ieVal.SetIPAddressValue(net.ParseIP(record["DstAddr"].(string)))
-		} else {
-			return fmt.Errorf("unable to find dest IPv4 address (DstAddr) in record")
-		}
-	case "sourceIPv6Address":
-		if record["SrcAddr"] != nil {
-			ieVal.SetIPAddressValue(net.ParseIP(record["SrcAddr"].(string)))
-		} else {
-			return fmt.Errorf("unable to find source IPv6 address (SrcAddr) in record")
-		}
-	case "destinationIPv6Address":
-		if record["DstAddr"] != nil {
-			ieVal.SetIPAddressValue(net.ParseIP(record["DstAddr"].(string)))
-		} else {
-			return fmt.Errorf("unable to find dest IPv6 address (DstAddr) in record")
-		}
-	case "protocolIdentifier":
-		if record["Proto"] != nil {
-			ieVal.SetUnsigned8Value(uint8(record["Proto"].(uint32)))
-		} else {
-			return fmt.Errorf("unable to find protocol identifier (Proto) in record")
-		}
-	case "nextHeaderIPv6":
-		if record["Proto"] != nil {
-			ieVal.SetUnsigned8Value(uint8(record["Proto"].(uint32)))
-		} else {
-			return fmt.Errorf("unable to find next header (Proto) in record")
-		}
-	case "sourceTransportPort":
-		if record["SrcPort"] != nil {
-			ieVal.SetUnsigned16Value(uint16(record["SrcPort"].(uint32)))
-		} else {
-			return fmt.Errorf("unable to find source port (SrcPort) in record")
-		}
-	case "destinationTransportPort":
-		if record["DstPort"] != nil {
-			ieVal.SetUnsigned16Value(uint16(record["DstPort"].(uint32)))
-		} else {
-			return fmt.Errorf("unable to find dest port (DstPort) in record")
-		}
-	case "octetDeltaCount":
-		if record["Bytes"] != nil {
-			ieVal.SetUnsigned64Value(record["Bytes"].(uint64))
-		} else {
-			return fmt.Errorf("unable to find bytes in record")
-		}
-	case "flowStartMilliseconds":
-		if record["TimeFlowStartMs"] != nil {
-			ieVal.SetUnsigned64Value(uint64(record["TimeFlowStartMs"].(int64)))
-		} else {
-			return fmt.Errorf("unable to find flow start time (TimeFlowStartMs) in record")
-		}
-	case "flowEndMilliseconds":
-		if record["TimeFlowEndMs"] != nil {
-			ieVal.SetUnsigned64Value(uint64(record["TimeFlowEndMs"].(int64)))
-		} else {
-			return fmt.Errorf("unable to find flow end time (TimeFlowEndMs) in record")
-		}
-	case "packetDeltaCount":
-		if record["Packets"] != nil {
-			ieVal.SetUnsigned64Value(record["Packets"].(uint64))
-		} else {
-			return fmt.Errorf("unable to find packets in record")
-		}
-	case "interfaceName":
-		interfaces := record["Interfaces"].([]string)
-		if len(interfaces) > 0 {
-			ieVal.SetStringValue(interfaces[0])
-		} else {
-			return fmt.Errorf("unable to find interface in record")
-		}
-	case "timeFlowRttNs":
-		if record["TimeFlowRttNs"] != nil {
-			ieVal.SetUnsigned64Value(uint64(record["TimeFlowRttNs"].(int64)))
-		} else {
-			return fmt.Errorf("unable to find timeflowrtt in record")
-		}
+	name := ieVal.GetName()
+	mapping, ok := MapIPFIXKeys[name]
+	if !ok {
+		return nil
+	}
+	if value := record[mapping.Key]; value != nil {
+		mapping.Setter(ieVal, value)
+	} else if !mapping.Optional {
+		return fmt.Errorf("unable to find %s (%s) in record", name, mapping.Key)
 	}
 	return nil
 }
 
-func setKubeIEValue(record config.GenericMap, ieValPtr *entities.InfoElementWithValue) {
-	ieVal := *ieValPtr
-	switch ieVal.GetName() {
-	case "sourcePodNamespace":
-		if record["SrcK8S_Namespace"] != nil {
-			ieVal.SetStringValue(record["SrcK8S_Namespace"].(string))
-		} else {
-			ieVal.SetStringValue("none")
-		}
-	case "sourcePodName":
-		if record["SrcK8S_Name"] != nil {
-			ieVal.SetStringValue(record["SrcK8S_Name"].(string))
-		} else {
-			ieVal.SetStringValue("none")
-		}
-	case "destinationPodNamespace":
-		if record["DstK8S_Namespace"] != nil {
-			ieVal.SetStringValue(record["DstK8S_Namespace"].(string))
-		} else {
-			ieVal.SetStringValue("none")
-		}
-	case "destinationPodName":
-		if record["DstK8S_Name"] != nil {
-			ieVal.SetStringValue(record["DstK8S_Name"].(string))
-		} else {
-			ieVal.SetStringValue("none")
-		}
-	case "sourceNodeName":
-		if record["SrcK8S_HostName"] != nil {
-			ieVal.SetStringValue(record["SrcK8S_HostName"].(string))
-		} else {
-			ieVal.SetStringValue("none")
-		}
-	case "destinationNodeName":
-		if record["DstK8S_HostName"] != nil {
-			ieVal.SetStringValue(record["DstK8S_HostName"].(string))
-		} else {
-			ieVal.SetStringValue("none")
-		}
-	}
-}
-func setEntities(record config.GenericMap, enrichEnterpriseID uint32, elements *[]entities.InfoElementWithValue) error {
+func setEntities(record config.GenericMap, elements *[]entities.InfoElementWithValue) error {
 	for _, ieVal := range *elements {
-		err := setStandardIEValue(record, &ieVal)
+		err := setElementValue(record, &ieVal)
 		if err != nil {
 			return err
 		}
-		if enrichEnterpriseID != 0 {
-			setKubeIEValue(record, &ieVal)
-		}
 	}
 	return nil
 }
@@ -411,13 +471,13 @@ func (t *writeIpfix) sendDataRecord(record config.GenericMap, v6 bool) error {
 	var templateID uint16
 	if v6 {
 		templateID = t.templateIDv6
-		err := setEntities(record, t.enrichEnterpriseID, &t.entitiesV6)
+		err := setEntities(record, &t.entitiesV6)
 		if err != nil {
 			return err
 		}
 	} else {
 		templateID = t.templateIDv4
-		err := setEntities(record, t.enrichEnterpriseID, &t.entitiesV4)
+		err := setEntities(record, &t.entitiesV4)
 		if err != nil {
 			return err
 		}
@@ -447,7 +507,7 @@ func (t *writeIpfix) sendDataRecord(record config.GenericMap, v6 bool) error {
 // Write writes a flow before being stored
 func (t *writeIpfix) Write(entry config.GenericMap) {
 	ilog.Tracef("entering writeIpfix Write")
-	if IPv6Type == entry["Etype"].(uint32) {
+	if IPv6Type == entry["Etype"].(uint16) {
 		err := t.sendDataRecord(entry, true)
 		if err != nil {
 			ilog.WithError(err).Error("Failed in send v6 IPFIX record")
@@ -458,7 +518,6 @@ func (t *writeIpfix) Write(entry config.GenericMap) {
 			ilog.WithError(err).Error("Failed in send v4 IPFIX record")
 		}
 	}
-
 }
 
 // NewWriteIpfix creates a new write
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go
index fcaaa5fe5e72ce3d9d53213aca3700b4584c1dfb..8b46db9b9efec80a14a37dfc0af1afb0fc7c8f6a 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go
@@ -64,22 +64,22 @@ type Loki struct {
 func buildLokiConfig(c *api.WriteLoki) (loki.Config, error) {
 	batchWait, err := time.ParseDuration(c.BatchWait)
 	if err != nil {
-		return loki.Config{}, fmt.Errorf("failed in parsing BatchWait : %v", err)
+		return loki.Config{}, fmt.Errorf("failed in parsing BatchWait : %w", err)
 	}
 
 	timeout, err := time.ParseDuration(c.Timeout)
 	if err != nil {
-		return loki.Config{}, fmt.Errorf("failed in parsing Timeout : %v", err)
+		return loki.Config{}, fmt.Errorf("failed in parsing Timeout : %w", err)
 	}
 
 	minBackoff, err := time.ParseDuration(c.MinBackoff)
 	if err != nil {
-		return loki.Config{}, fmt.Errorf("failed in parsing MinBackoff : %v", err)
+		return loki.Config{}, fmt.Errorf("failed in parsing MinBackoff : %w", err)
 	}
 
 	maxBackoff, err := time.ParseDuration(c.MaxBackoff)
 	if err != nil {
-		return loki.Config{}, fmt.Errorf("failed in parsing MaxBackoff : %v", err)
+		return loki.Config{}, fmt.Errorf("failed in parsing MaxBackoff : %w", err)
 	}
 
 	cfg := loki.Config{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ef7e5b27514328a0beb24756a7e09fab88415b62..3a14d890a7331ce4caabf04204e5b710d87e3239 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -237,8 +237,8 @@ github.com/mwitkow/go-conntrack
 # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
 ## explicit
 github.com/mxk/go-flowrate/flowrate
-# github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240312115357-ddc1f67022a5
-## explicit; go 1.20
+# github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240325100124-fd783b283c7c
+## explicit; go 1.21
 github.com/netobserv/flowlogs-pipeline/pkg/api
 github.com/netobserv/flowlogs-pipeline/pkg/config
 github.com/netobserv/flowlogs-pipeline/pkg/operational