diff --git a/bpf/configs.h b/bpf/configs.h
new file mode 100644
index 0000000000000000000000000000000000000000..208faea1f4b3e96bce3f11b5dab2b7092a8d5bfb
--- /dev/null
+++ b/bpf/configs.h
@@ -0,0 +1,9 @@
+
+#ifndef __CONFIGS_H__
+#define __CONFIGS_H__
+
+// Constant definitions, to be overridden by the invoker
+volatile const u32 sampling = 0;
+volatile const u8 trace_messages = 0;
+
+#endif //__CONFIGS_H__
diff --git a/bpf/dns_tracker.h b/bpf/dns_tracker.h
new file mode 100644
index 0000000000000000000000000000000000000000..f3aa795f37f65d1d68b7fee4b92b6e32aff738b2
--- /dev/null
+++ b/bpf/dns_tracker.h
@@ -0,0 +1,103 @@
+/*
+    light weight DNS tracker using trace points.
+*/
+
+#ifndef __DNS_TRACKER_H__
+#define __DNS_TRACKER_H__
+#include "utils.h"
+
+#define DNS_PORT        53
+#define DNS_QR_FLAG     0x8000
+#define UDP_MAXMSG      512
+
+struct dns_header {
+    u16 id;
+    u16 flags;
+    u16 qdcount;
+    u16 ancount;
+    u16 nscount;
+    u16 arcount;
+};
+
+static inline void find_or_create_dns_flow(flow_id *id, struct dns_header *dns, int len, int dir, u16 flags) {
+    flow_metrics *aggregate_flow = bpf_map_lookup_elem(&aggregated_flows, id);
+    u64 current_time = bpf_ktime_get_ns();
+    // net_dev_queue trace point hook will run before TC hooks, so the flow shouldn't exists, if it does
+    // that indicates we have a stale DNS query/response or in the middle of TCP flow so we will do nothing
+    if (aggregate_flow == NULL) {
+        // there is no matching flows so lets create new one and add the drops
+         flow_metrics new_flow;
+         __builtin_memset(&new_flow, 0, sizeof(new_flow));
+         new_flow.start_mono_time_ts = current_time;
+         new_flow.end_mono_time_ts = current_time;
+         new_flow.packets = 1;
+         new_flow.bytes = len;
+         new_flow.flags = flags;
+         new_flow.dns_record.id = bpf_ntohs(dns->id);
+         new_flow.dns_record.flags = bpf_ntohs(dns->flags);
+        if (dir == EGRESS) {
+            new_flow.dns_record.req_mono_time_ts = current_time;
+        } else {
+            new_flow.dns_record.rsp_mono_time_ts = current_time;
+        }
+        bpf_map_update_elem(&aggregated_flows, id, &new_flow, BPF_ANY);
+    }
+}
+
+static inline int trace_dns(struct sk_buff *skb) {
+    flow_id id;
+    u8 protocol = 0;
+    u16 family = 0,flags = 0, len = 0;
+
+    __builtin_memset(&id, 0, sizeof(id));
+
+    id.if_index = skb->skb_iif;
+
+    // read L2 info
+    set_key_with_l2_info(skb, &id, &family);
+
+    // read L3 info
+    set_key_with_l3_info(skb, family, &id, &protocol);
+
+    switch (protocol) {
+    case IPPROTO_UDP:
+        len = set_key_with_udp_info(skb, &id, IPPROTO_UDP);
+        // make sure udp payload doesn't exceed max msg size
+        if (len - sizeof(struct udphdr) > UDP_MAXMSG) {
+            return -1;
+        }
+        // set the length to udp hdr size as it will be used below to locate dns header
+        len = sizeof(struct udphdr);
+        break;
+    case IPPROTO_TCP:
+        len = set_key_with_tcp_info(skb, &id, IPPROTO_TCP, &flags);
+        break;
+    default:
+        return -1;
+    }
+
+    // check for DNS packets
+    if (id.dst_port == DNS_PORT || id.src_port == DNS_PORT) {
+        struct dns_header dns;
+        bpf_probe_read(&dns, sizeof(dns), (struct dns_header *)(skb->head + skb->transport_header + len));
+        if ((bpf_ntohs(dns.flags) & DNS_QR_FLAG) == 0) { /* dns query */
+            id.direction = EGRESS;
+        } else { /* dns response */
+            id.direction = INGRESS;
+        } // end of dns response
+        find_or_create_dns_flow(&id, &dns, skb->len, id.direction, flags);
+    } // end of dns port check
+
+    return 0;
+}
+
+SEC("tracepoint/net/net_dev_queue")
+int trace_net_packets(struct trace_event_raw_net_dev_template *args) {
+    struct sk_buff skb;
+
+    __builtin_memset(&skb, 0, sizeof(skb));
+    bpf_probe_read(&skb, sizeof(struct sk_buff), args->skbaddr);
+    return trace_dns(&skb);
+}
+
+#endif // __DNS_TRACKER_H__
diff --git a/bpf/flow.h b/bpf/flow.h
index 366a686b754077851b363245768a21c82772f928..ddb306f8ee0d566f376b9e63a377df8b535533c7 100644
--- a/bpf/flow.h
+++ b/bpf/flow.h
@@ -10,6 +10,8 @@ typedef __u16 u16;
 typedef __u32 u32;
 typedef __u64 u64;
 
+#define AF_INET  2
+#define AF_INET6 10
 #define ETH_ALEN 6
 #define ETH_P_IP 0x0800
 #define ETH_P_IPV6 0x86DD
@@ -30,8 +32,24 @@ typedef struct flow_metrics_t {
     // 0 otherwise
     // https://chromium.googlesource.com/chromiumos/docs/+/master/constants/errnos.md
     u8 errno;
+    struct tcp_drops_t {
+        u32 packets;
+        u64 bytes;
+        u16 latest_flags;
+        u8 latest_state;
+        u32 latest_drop_cause;
+    } __attribute__((packed)) tcp_drops;
+    struct dns_record_t {
+        u16 id;
+        u16 flags;
+        u64 req_mono_time_ts;
+        u64 rsp_mono_time_ts;
+    } __attribute__((packed)) dns_record;
 } __attribute__((packed)) flow_metrics;
 
+// Force emitting struct tcp_drops into the ELF.
+const struct tcp_drops_t *unused0 __attribute__((unused));
+
 // Force emitting struct flow_metrics into the ELF.
 const struct flow_metrics_t *unused1 __attribute__((unused));
 
@@ -71,4 +89,8 @@ typedef struct flow_record_t {
 
 // Force emitting struct flow_record into the ELF.
 const struct flow_record_t *unused3 __attribute__((unused));
+
+// Force emitting struct dns_record into the ELF.
+const struct dns_record_t *unused4 __attribute__((unused));
+
 #endif
diff --git a/bpf/flows.c b/bpf/flows.c
index ba934557f19cddfcdb57bee22168f69dec983007..5219b808d777c8249f1136ffea33a5e04f7a2c02 100644
--- a/bpf/flows.c
+++ b/bpf/flows.c
@@ -13,226 +13,10 @@
             until an entry is available.
         4) When hash collision is detected, we send the new entry to userpace via ringbuffer.
 */
-#include <vmlinux.h>
-#include <bpf_helpers.h>
+#include "utils.h"
+#include "tcp_drops.h"
+#include "dns_tracker.h"
 
-#include "flow.h"
-#define DISCARD 1
-#define SUBMIT 0
-
-// according to field 61 in https://www.iana.org/assignments/ipfix/ipfix.xhtml
-#define INGRESS 0
-#define EGRESS 1
-
-// Flags according to RFC 9293 & https://www.iana.org/assignments/ipfix/ipfix.xhtml
-#define FIN_FLAG 0x01
-#define SYN_FLAG 0x02
-#define RST_FLAG 0x04
-#define PSH_FLAG 0x08
-#define ACK_FLAG 0x10
-#define URG_FLAG 0x20
-#define ECE_FLAG 0x40
-#define CWR_FLAG 0x80
-// Custom flags exported
-#define SYN_ACK_FLAG 0x100
-#define FIN_ACK_FLAG 0x200
-#define RST_ACK_FLAG 0x400
-
-#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
-	__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define bpf_ntohs(x)		__builtin_bswap16(x)
-#define bpf_htons(x)		__builtin_bswap16(x)
-#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
-	__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define bpf_ntohs(x)		(x)
-#define bpf_htons(x)		(x)
-#else
-# error "Endianness detection needs to be set up for your compiler?!"
-#endif
-
-// Common Ringbuffer as a conduit for ingress/egress flows to userspace
-struct {
-    __uint(type, BPF_MAP_TYPE_RINGBUF);
-    __uint(max_entries, 1 << 24);
-} direct_flows SEC(".maps");
-
-// Key: the flow identifier. Value: the flow metrics for that identifier.
-struct {
-    __uint(type, BPF_MAP_TYPE_HASH);
-    __type(key, flow_id);
-    __type(value, flow_metrics);
-    __uint(max_entries, 1 << 24);
-    __uint(map_flags, BPF_F_NO_PREALLOC);
-} aggregated_flows SEC(".maps");
-
-// Constant definitions, to be overridden by the invoker
-volatile const u32 sampling = 0;
-volatile const u8 trace_messages = 0;
-
-const u8 ip4in6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff};
-
-// sets the TCP header flags for connection information
-static inline void set_flags(struct tcphdr *th, u16 *flags) {
-    //If both ACK and SYN are set, then it is server -> client communication during 3-way handshake. 
-    if (th->ack && th->syn) {
-        *flags |= SYN_ACK_FLAG;
-    } else if (th->ack && th->fin ) {
-        // If both ACK and FIN are set, then it is graceful termination from server.
-        *flags |= FIN_ACK_FLAG;
-    } else if (th->ack && th->rst ) {
-        // If both ACK and RST are set, then it is abrupt connection termination. 
-        *flags |= RST_ACK_FLAG;
-    } else if (th->fin) {
-        *flags |= FIN_FLAG;
-    } else if (th->syn) {
-        *flags |= SYN_FLAG;
-    } else if (th->ack) {
-        *flags |= ACK_FLAG;
-    } else if (th->rst) {
-        *flags |= RST_FLAG;
-    } else if (th->psh) {
-        *flags |= PSH_FLAG;
-    } else if (th->urg) {
-        *flags |= URG_FLAG;
-    } else if (th->ece) {
-        *flags |= ECE_FLAG;
-    } else if (th->cwr) {
-        *flags |= CWR_FLAG;
-    }
-}
-
-// L4_info structure contains L4 headers parsed information.
-struct l4_info_t {
-    // TCP/UDP/SCTP source port in host byte order
-    u16 src_port;
-    // TCP/UDP/SCTP destination port in host byte order
-    u16 dst_port;
-    // ICMPv4/ICMPv6 type value
-    u8 icmp_type;
-    // ICMPv4/ICMPv6 code value
-    u8 icmp_code;
-    // TCP flags
-    u16 flags;
-};
-
-// Extract L4 info for the supported protocols
-static inline void fill_l4info(void *l4_hdr_start, void *data_end, u8 protocol,
-                               struct l4_info_t *l4_info) {
-	switch (protocol) {
-    case IPPROTO_TCP: {
-        struct tcphdr *tcp = l4_hdr_start;
-        if ((void *)tcp + sizeof(*tcp) <= data_end) {
-            l4_info->src_port = bpf_ntohs(tcp->source);
-            l4_info->dst_port = bpf_ntohs(tcp->dest);
-            set_flags(tcp, &l4_info->flags);
-        }
-    } break;
-    case IPPROTO_UDP: {
-        struct udphdr *udp = l4_hdr_start;
-        if ((void *)udp + sizeof(*udp) <= data_end) {
-            l4_info->src_port = bpf_ntohs(udp->source);
-            l4_info->dst_port = bpf_ntohs(udp->dest);
-        }
-    } break;
-    case IPPROTO_SCTP: {
-        struct sctphdr *sctph = l4_hdr_start;
-        if ((void *)sctph + sizeof(*sctph) <= data_end) {
-            l4_info->src_port = bpf_ntohs(sctph->source);
-            l4_info->dst_port = bpf_ntohs(sctph->dest);
-        }
-    } break;
-    case IPPROTO_ICMP: {
-        struct icmphdr *icmph = l4_hdr_start;
-        if ((void *)icmph + sizeof(*icmph) <= data_end) {
-            l4_info->icmp_type = icmph->type;
-            l4_info->icmp_code = icmph->code;
-        }
-    } break;
-    case IPPROTO_ICMPV6: {
-        struct icmp6hdr *icmp6h = l4_hdr_start;
-         if ((void *)icmp6h + sizeof(*icmp6h) <= data_end) {
-            l4_info->icmp_type = icmp6h->icmp6_type;
-            l4_info->icmp_code = icmp6h->icmp6_code;
-        }
-    } break;
-    default:
-        break;
-    }
-}
-
-// sets flow fields from IPv4 header information
-static inline int fill_iphdr(struct iphdr *ip, void *data_end, flow_id *id, u16 *flags) {
-    struct l4_info_t l4_info;
-    void *l4_hdr_start;
-
-    l4_hdr_start = (void *)ip + sizeof(*ip);
-    if (l4_hdr_start > data_end) {
-        return DISCARD;
-    }
-    __builtin_memset(&l4_info, 0, sizeof(l4_info));
-    __builtin_memcpy(id->src_ip, ip4in6, sizeof(ip4in6));
-    __builtin_memcpy(id->dst_ip, ip4in6, sizeof(ip4in6));
-    __builtin_memcpy(id->src_ip + sizeof(ip4in6), &ip->saddr, sizeof(ip->saddr));
-    __builtin_memcpy(id->dst_ip + sizeof(ip4in6), &ip->daddr, sizeof(ip->daddr));
-    id->transport_protocol = ip->protocol;
-    fill_l4info(l4_hdr_start, data_end, ip->protocol, &l4_info);
-    id->src_port = l4_info.src_port;
-    id->dst_port = l4_info.dst_port;
-    id->icmp_type = l4_info.icmp_type;
-    id->icmp_code = l4_info.icmp_code;
-    *flags = l4_info.flags;
-
-    return SUBMIT;
-}
-
-// sets flow fields from IPv6 header information
-static inline int fill_ip6hdr(struct ipv6hdr *ip, void *data_end, flow_id *id, u16 *flags) {
-    struct l4_info_t l4_info;
-    void *l4_hdr_start;
-
-    l4_hdr_start = (void *)ip + sizeof(*ip);
-    if (l4_hdr_start > data_end) {
-        return DISCARD;
-    }
-    __builtin_memset(&l4_info, 0, sizeof(l4_info));
-    __builtin_memcpy(id->src_ip, ip->saddr.in6_u.u6_addr8, 16);
-    __builtin_memcpy(id->dst_ip, ip->daddr.in6_u.u6_addr8, 16);
-    id->transport_protocol = ip->nexthdr;
-    fill_l4info(l4_hdr_start, data_end, ip->nexthdr, &l4_info);
-    id->src_port = l4_info.src_port;
-    id->dst_port = l4_info.dst_port;
-    id->icmp_type = l4_info.icmp_type;
-    id->icmp_code = l4_info.icmp_code;
-    *flags = l4_info.flags;
-
-    return SUBMIT;
-}
-// sets flow fields from Ethernet header information
-static inline int fill_ethhdr(struct ethhdr *eth, void *data_end, flow_id *id, u16 *flags) {
-    if ((void *)eth + sizeof(*eth) > data_end) {
-        return DISCARD;
-    }
-    __builtin_memcpy(id->dst_mac, eth->h_dest, ETH_ALEN);
-    __builtin_memcpy(id->src_mac, eth->h_source, ETH_ALEN);
-    id->eth_protocol = bpf_ntohs(eth->h_proto);
-
-    if (id->eth_protocol == ETH_P_IP) {
-        struct iphdr *ip = (void *)eth + sizeof(*eth);
-        return fill_iphdr(ip, data_end, id, flags);
-    } else if (id->eth_protocol == ETH_P_IPV6) {
-        struct ipv6hdr *ip6 = (void *)eth + sizeof(*eth);
-        return fill_ip6hdr(ip6, data_end, id, flags);
-    } else {
-        // TODO : Need to implement other specific ethertypes if needed
-        // For now other parts of flow id remain zero
-        __builtin_memset(&(id->src_ip), 0, sizeof(struct in6_addr));
-        __builtin_memset(&(id->dst_ip), 0, sizeof(struct in6_addr));
-        id->transport_protocol = 0;
-        id->src_port = 0;
-        id->dst_port = 0;
-    }
-    return SUBMIT;
-}
 
 static inline int flow_monitor(struct __sk_buff *skb, u8 direction) {
     // If sampling is defined, will only parse 1 out of "sampling" flows
@@ -317,4 +101,5 @@ SEC("tc_egress")
 int egress_flow_parse(struct __sk_buff *skb) {
     return flow_monitor(skb, EGRESS);
 }
+
 char _license[] SEC("license") = "GPL";
diff --git a/bpf/maps_definition.h b/bpf/maps_definition.h
new file mode 100644
index 0000000000000000000000000000000000000000..8bd0d01207bdc04977a79aa4351b6b0115d51f25
--- /dev/null
+++ b/bpf/maps_definition.h
@@ -0,0 +1,21 @@
+#ifndef __MAPS_DEFINITION_H__
+#define __MAPS_DEFINITION_H__
+
+#include <vmlinux.h>
+
+// Common Ringbuffer as a conduit for ingress/egress flows to userspace
+struct {
+    __uint(type, BPF_MAP_TYPE_RINGBUF);
+    __uint(max_entries, 1 << 24);
+} direct_flows SEC(".maps");
+
+// Key: the flow identifier. Value: the flow metrics for that identifier.
+struct {
+    __uint(type, BPF_MAP_TYPE_HASH);
+    __type(key, flow_id);
+    __type(value, flow_metrics);
+    __uint(max_entries, 1 << 24);
+    __uint(map_flags, BPF_F_NO_PREALLOC);
+} aggregated_flows SEC(".maps");
+
+#endif //__MAPS_DEFINITION_H__
diff --git a/bpf/tcp_drops.h b/bpf/tcp_drops.h
new file mode 100644
index 0000000000000000000000000000000000000000..bbd84b547ef9e50166a4f15c619ce24659705ba8
--- /dev/null
+++ b/bpf/tcp_drops.h
@@ -0,0 +1,88 @@
+/*
+    TCPDrops using trace points.
+*/
+
+#ifndef __TCP_DROPS_H__
+#define __TCP_DROPS_H__
+
+#include "utils.h"
+
+static inline int trace_tcp_drop(void *ctx, struct sock *sk,
+                                 struct sk_buff *skb,
+                                 enum skb_drop_reason reason) {
+    if (sk == NULL)
+        return 0;
+
+    flow_id id;
+    __builtin_memset(&id, 0, sizeof(id));
+
+    u8 state = 0, protocol = 0;
+    u16 family = 0,flags = 0;
+
+    // pull in details from the packet headers and the sock struct
+    bpf_probe_read(&state, sizeof(u8), (u8 *)&sk->__sk_common.skc_state);
+
+    id.if_index = skb->skb_iif;
+
+    // read L2 info
+    set_key_with_l2_info(skb, &id, &family);
+
+    // read L3 info
+    set_key_with_l3_info(skb, family, &id, &protocol);
+
+    // We only support TCP drops for any other protocol just return w/o doing anything
+    if (protocol != IPPROTO_TCP) {
+        return 0;
+    }
+
+    // read L4 info
+    set_key_with_tcp_info(skb, &id, protocol, &flags);
+
+    long ret = 0;
+    for (direction_t dir = INGRESS; dir < MAX_DIRECTION; dir++) {
+        id.direction = dir;
+        ret = tcp_drop_lookup_and_update_flow(skb, &id, state, flags, reason);
+        if (ret == 0) {
+            return 0;
+        }
+    }
+    // there is no matching flows so lets create new one and add the drops
+    u64 current_time = bpf_ktime_get_ns();
+    id.direction = INGRESS;
+    flow_metrics new_flow = {
+        .start_mono_time_ts = current_time,
+        .end_mono_time_ts = current_time,
+        .flags = flags,
+        .tcp_drops.packets = 1,
+        .tcp_drops.bytes = skb->len,
+        .tcp_drops.latest_state = state,
+        .tcp_drops.latest_flags = flags,
+        .tcp_drops.latest_drop_cause = reason,
+    };
+    ret = bpf_map_update_elem(&aggregated_flows, &id, &new_flow, BPF_ANY);
+    if (trace_messages && ret != 0) {
+        bpf_printk("error tcp drop creating new flow %d\n", ret);
+    }
+
+    return ret;
+}
+
+SEC("tracepoint/skb/kfree_skb")
+int kfree_skb(struct trace_event_raw_kfree_skb *args) {
+    struct sk_buff skb;
+    __builtin_memset(&skb, 0, sizeof(skb));
+
+    bpf_probe_read(&skb, sizeof(struct sk_buff), args->skbaddr);
+    struct sock *sk = skb.sk;
+    enum skb_drop_reason reason = args->reason;
+
+    // SKB_NOT_DROPPED_YET,
+    // SKB_CONSUMED,
+    // SKB_DROP_REASON_NOT_SPECIFIED,
+    if (reason > SKB_DROP_REASON_NOT_SPECIFIED) {
+        return trace_tcp_drop(args, sk, &skb, reason);
+    }
+    return 0;
+}
+
+#endif //__TCP_DROPS_H__
diff --git a/bpf/utils.h b/bpf/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..5338f1b72ae2d0cd8896dcc193933f2be4222128
--- /dev/null
+++ b/bpf/utils.h
@@ -0,0 +1,295 @@
+#ifndef __UTILS_H__
+#define __UTILS_H__
+
+#include <vmlinux.h>
+#include <bpf_helpers.h>
+
+#include "flow.h"
+#include "maps_definition.h"
+#include "configs.h"
+
+#define DISCARD 1
+#define SUBMIT 0
+
+// according to field 61 in https://www.iana.org/assignments/ipfix/ipfix.xhtml
+typedef enum {
+    INGRESS         = 0,
+    EGRESS          = 1,
+    MAX_DIRECTION   = 2,
+} direction_t;
+
+// L4_info structure contains L4 headers parsed information.
+struct l4_info_t {
+    // TCP/UDP/SCTP source port in host byte order
+    u16 src_port;
+    // TCP/UDP/SCTP destination port in host byte order
+    u16 dst_port;
+    // ICMPv4/ICMPv6 type value
+    u8 icmp_type;
+    // ICMPv4/ICMPv6 code value
+    u8 icmp_code;
+    // TCP flags
+    u16 flags;
+};
+
+const u8 ip4in6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff};
+
+// Flags according to RFC 9293 & https://www.iana.org/assignments/ipfix/ipfix.xhtml
+#define FIN_FLAG 0x01
+#define SYN_FLAG 0x02
+#define RST_FLAG 0x04
+#define PSH_FLAG 0x08
+#define ACK_FLAG 0x10
+#define URG_FLAG 0x20
+#define ECE_FLAG 0x40
+#define CWR_FLAG 0x80
+// Custom flags exported
+#define SYN_ACK_FLAG 0x100
+#define FIN_ACK_FLAG 0x200
+#define RST_ACK_FLAG 0x400
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+	__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define bpf_ntohs(x)		__builtin_bswap16(x)
+#define bpf_htons(x)		__builtin_bswap16(x)
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+	__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define bpf_ntohs(x)		(x)
+#define bpf_htons(x)		(x)
+#else
+# error "Endianness detection needs to be set up for your compiler?!"
+#endif
+
+
+// sets the TCP header flags for connection information
+static inline void set_flags(struct tcphdr *th, u16 *flags) {
+    //If both ACK and SYN are set, then it is server -> client communication during 3-way handshake.
+    if (th->ack && th->syn) {
+        *flags |= SYN_ACK_FLAG;
+    } else if (th->ack && th->fin ) {
+        // If both ACK and FIN are set, then it is graceful termination from server.
+        *flags |= FIN_ACK_FLAG;
+    } else if (th->ack && th->rst ) {
+        // If both ACK and RST are set, then it is abrupt connection termination.
+        *flags |= RST_ACK_FLAG;
+    } else if (th->fin) {
+        *flags |= FIN_FLAG;
+    } else if (th->syn) {
+        *flags |= SYN_FLAG;
+    } else if (th->ack) {
+        *flags |= ACK_FLAG;
+    } else if (th->rst) {
+        *flags |= RST_FLAG;
+    } else if (th->psh) {
+        *flags |= PSH_FLAG;
+    } else if (th->urg) {
+        *flags |= URG_FLAG;
+    } else if (th->ece) {
+        *flags |= ECE_FLAG;
+    } else if (th->cwr) {
+        *flags |= CWR_FLAG;
+    }
+}
+
+// Extract L4 info for the supported protocols
+static inline void fill_l4info(void *l4_hdr_start, void *data_end, u8 protocol,
+                               struct l4_info_t *l4_info) {
+	switch (protocol) {
+    case IPPROTO_TCP: {
+        struct tcphdr *tcp = l4_hdr_start;
+        if ((void *)tcp + sizeof(*tcp) <= data_end) {
+            l4_info->src_port = bpf_ntohs(tcp->source);
+            l4_info->dst_port = bpf_ntohs(tcp->dest);
+            set_flags(tcp, &l4_info->flags);
+        }
+    } break;
+    case IPPROTO_UDP: {
+        struct udphdr *udp = l4_hdr_start;
+        if ((void *)udp + sizeof(*udp) <= data_end) {
+            l4_info->src_port = bpf_ntohs(udp->source);
+            l4_info->dst_port = bpf_ntohs(udp->dest);
+        }
+    } break;
+    case IPPROTO_SCTP: {
+        struct sctphdr *sctph = l4_hdr_start;
+        if ((void *)sctph + sizeof(*sctph) <= data_end) {
+            l4_info->src_port = bpf_ntohs(sctph->source);
+            l4_info->dst_port = bpf_ntohs(sctph->dest);
+        }
+    } break;
+    case IPPROTO_ICMP: {
+        struct icmphdr *icmph = l4_hdr_start;
+        if ((void *)icmph + sizeof(*icmph) <= data_end) {
+            l4_info->icmp_type = icmph->type;
+            l4_info->icmp_code = icmph->code;
+        }
+    } break;
+    case IPPROTO_ICMPV6: {
+        struct icmp6hdr *icmp6h = l4_hdr_start;
+         if ((void *)icmp6h + sizeof(*icmp6h) <= data_end) {
+            l4_info->icmp_type = icmp6h->icmp6_type;
+            l4_info->icmp_code = icmp6h->icmp6_code;
+        }
+    } break;
+    default:
+        break;
+    }
+}
+
+// sets flow fields from IPv4 header information
+static inline int fill_iphdr(struct iphdr *ip, void *data_end, flow_id *id, u16 *flags) {
+    struct l4_info_t l4_info;
+    void *l4_hdr_start;
+
+    l4_hdr_start = (void *)ip + sizeof(*ip);
+    if (l4_hdr_start > data_end) {
+        return DISCARD;
+    }
+    __builtin_memset(&l4_info, 0, sizeof(l4_info));
+    __builtin_memcpy(id->src_ip, ip4in6, sizeof(ip4in6));
+    __builtin_memcpy(id->dst_ip, ip4in6, sizeof(ip4in6));
+    __builtin_memcpy(id->src_ip + sizeof(ip4in6), &ip->saddr, sizeof(ip->saddr));
+    __builtin_memcpy(id->dst_ip + sizeof(ip4in6), &ip->daddr, sizeof(ip->daddr));
+    id->transport_protocol = ip->protocol;
+    fill_l4info(l4_hdr_start, data_end, ip->protocol, &l4_info);
+    id->src_port = l4_info.src_port;
+    id->dst_port = l4_info.dst_port;
+    id->icmp_type = l4_info.icmp_type;
+    id->icmp_code = l4_info.icmp_code;
+    *flags = l4_info.flags;
+
+    return SUBMIT;
+}
+
+// sets flow fields from IPv6 header information
+static inline int fill_ip6hdr(struct ipv6hdr *ip, void *data_end, flow_id *id, u16 *flags) {
+    struct l4_info_t l4_info;
+    void *l4_hdr_start;
+
+    l4_hdr_start = (void *)ip + sizeof(*ip);
+    if (l4_hdr_start > data_end) {
+        return DISCARD;
+    }
+    __builtin_memset(&l4_info, 0, sizeof(l4_info));
+    __builtin_memcpy(id->src_ip, ip->saddr.in6_u.u6_addr8, IP_MAX_LEN);
+    __builtin_memcpy(id->dst_ip, ip->daddr.in6_u.u6_addr8, IP_MAX_LEN);
+    id->transport_protocol = ip->nexthdr;
+    fill_l4info(l4_hdr_start, data_end, ip->nexthdr, &l4_info);
+    id->src_port = l4_info.src_port;
+    id->dst_port = l4_info.dst_port;
+    id->icmp_type = l4_info.icmp_type;
+    id->icmp_code = l4_info.icmp_code;
+    *flags = l4_info.flags;
+
+    return SUBMIT;
+}
+
+// sets flow fields from Ethernet header information
+static inline int fill_ethhdr(struct ethhdr *eth, void *data_end, flow_id *id, u16 *flags) {
+    if ((void *)eth + sizeof(*eth) > data_end) {
+        return DISCARD;
+    }
+    __builtin_memcpy(id->dst_mac, eth->h_dest, ETH_ALEN);
+    __builtin_memcpy(id->src_mac, eth->h_source, ETH_ALEN);
+    id->eth_protocol = bpf_ntohs(eth->h_proto);
+
+    if (id->eth_protocol == ETH_P_IP) {
+        struct iphdr *ip = (void *)eth + sizeof(*eth);
+        return fill_iphdr(ip, data_end, id, flags);
+    } else if (id->eth_protocol == ETH_P_IPV6) {
+        struct ipv6hdr *ip6 = (void *)eth + sizeof(*eth);
+        return fill_ip6hdr(ip6, data_end, id, flags);
+    } else {
+        // TODO : Need to implement other specific ethertypes if needed
+        // For now other parts of flow id remain zero
+        __builtin_memset(&(id->src_ip), 0, sizeof(struct in6_addr));
+        __builtin_memset(&(id->dst_ip), 0, sizeof(struct in6_addr));
+        id->transport_protocol = 0;
+        id->src_port = 0;
+        id->dst_port = 0;
+    }
+    return SUBMIT;
+}
+
+static inline void set_key_with_l2_info(struct sk_buff *skb, flow_id *id, u16 *family) {
+     struct ethhdr eth;
+     __builtin_memset(&eth, 0, sizeof(eth));
+     bpf_probe_read(&eth, sizeof(eth), (struct ethhdr *)(skb->head + skb->mac_header));
+     id->eth_protocol = bpf_ntohs(eth.h_proto);
+     __builtin_memcpy(id->dst_mac, eth.h_dest, ETH_ALEN);
+     __builtin_memcpy(id->src_mac, eth.h_source, ETH_ALEN);
+    if (id->eth_protocol == ETH_P_IP) {
+        *family = AF_INET;
+    } else if (id->eth_protocol == ETH_P_IPV6) {
+        *family = AF_INET6;
+    }
+ }
+
+static inline void set_key_with_l3_info(struct sk_buff *skb, u16 family, flow_id *id, u8 *protocol) {
+     if (family == AF_INET) {
+         struct iphdr ip;
+         __builtin_memset(&ip, 0, sizeof(ip));
+         bpf_probe_read(&ip, sizeof(ip), (struct iphdr *)(skb->head + skb->network_header));
+         __builtin_memcpy(id->src_ip, ip4in6, sizeof(ip4in6));
+         __builtin_memcpy(id->dst_ip, ip4in6, sizeof(ip4in6));
+         __builtin_memcpy(id->src_ip + sizeof(ip4in6), &ip.saddr, sizeof(ip.saddr));
+         __builtin_memcpy(id->dst_ip + sizeof(ip4in6), &ip.daddr, sizeof(ip.daddr));
+         *protocol = ip.protocol;
+     } else if (family == AF_INET6) {
+         struct ipv6hdr ip;
+         __builtin_memset(&ip, 0, sizeof(ip));
+         bpf_probe_read(&ip, sizeof(ip), (struct ipv6hdr *)(skb->head + skb->network_header));
+         __builtin_memcpy(id->src_ip, ip.saddr.in6_u.u6_addr8, IP_MAX_LEN);
+         __builtin_memcpy(id->dst_ip, ip.daddr.in6_u.u6_addr8, IP_MAX_LEN);
+         *protocol = ip.nexthdr;
+     }
+ }
+
+static inline int set_key_with_tcp_info(struct sk_buff *skb, flow_id *id, u8 protocol, u16 *flags) {
+     u16 sport = 0,dport = 0;
+     struct tcphdr tcp;
+
+     __builtin_memset(&tcp, 0, sizeof(tcp));
+     bpf_probe_read(&tcp, sizeof(tcp), (struct tcphdr *)(skb->head + skb->transport_header));
+     sport = bpf_ntohs(tcp.source);
+     dport = bpf_ntohs(tcp.dest);
+     set_flags(&tcp, flags);
+     id->src_port = sport;
+     id->dst_port = dport;
+     id->transport_protocol = protocol;
+     return tcp.doff * sizeof(u32);
+ }
+
+static inline int set_key_with_udp_info(struct sk_buff *skb, flow_id *id, u8 protocol) {
+     u16 sport = 0,dport = 0;
+     struct udphdr udp;
+
+     __builtin_memset(&udp, 0, sizeof(udp));
+     bpf_probe_read(&udp, sizeof(udp), (struct udp *)(skb->head + skb->transport_header));
+     sport = bpf_ntohs(udp.source);
+     dport = bpf_ntohs(udp.dest);
+     id->src_port = sport;
+     id->dst_port = dport;
+     id->transport_protocol = protocol;
+     return bpf_ntohs(udp.len);
+ }
+
+static inline long tcp_drop_lookup_and_update_flow(struct sk_buff *skb, flow_id *id, u8 state, u16 flags,
+                                            enum skb_drop_reason reason) {
+     flow_metrics *aggregate_flow = bpf_map_lookup_elem(&aggregated_flows, id);
+     if (aggregate_flow != NULL) {
+         aggregate_flow->tcp_drops.packets += 1;
+         aggregate_flow->tcp_drops.bytes += skb->len;
+         aggregate_flow->tcp_drops.latest_state = state;
+         aggregate_flow->tcp_drops.latest_flags = flags;
+         aggregate_flow->tcp_drops.latest_drop_cause = reason;
+         long ret = bpf_map_update_elem(&aggregated_flows, id, aggregate_flow, BPF_ANY);
+         if (trace_messages && ret != 0) {
+             bpf_printk("error tcp drop updating flow %d\n", ret);
+         }
+         return 0;
+      }
+      return -1;
+ }
+
+#endif // __UTILS_H__
diff --git a/e2e/cluster/base/04-agent.yml b/e2e/cluster/base/04-agent.yml
index 19a0ff5213b2896305676d6cc25c3980a48a2707..7771b9f5d591374d597ebf75fb4fdaeb78a81576 100644
--- a/e2e/cluster/base/04-agent.yml
+++ b/e2e/cluster/base/04-agent.yml
@@ -32,3 +32,12 @@ spec:
                 fieldPath: status.hostIP
           - name: FLOWS_TARGET_PORT
             value: "9999"
+        volumeMounts:
+            - name: bpf-kernel-debug
+              mountPath: /sys/kernel/debug
+              mountPropagation: Bidirectional
+      volumes:
+        - name: bpf-kernel-debug
+          hostPath:
+            path: /sys/kernel/debug
+            type: Directory
diff --git a/e2e/ipfix/manifests/30-agent.yml b/e2e/ipfix/manifests/30-agent.yml
index d6e50d848491012f5a6c430ac0a75b4f05415c05..8a0b09fc3c9e24692ed2ac42fb5c9f0374af3e3c 100644
--- a/e2e/ipfix/manifests/30-agent.yml
+++ b/e2e/ipfix/manifests/30-agent.yml
@@ -34,3 +34,12 @@ spec:
                 fieldPath: status.hostIP
           - name: FLOWS_TARGET_PORT
             value: "9999"
+        volumeMounts:
+            - name: bpf-kernel-debug
+              mountPath: /sys/kernel/debug
+              mountPropagation: Bidirectional
+      volumes:
+        - name: bpf-kernel-debug
+          hostPath:
+            path: /sys/kernel/debug
+            type: Directory
diff --git a/e2e/kafka/manifests/30-agent.yml b/e2e/kafka/manifests/30-agent.yml
index e8ac4083561cc3fe7457376eaa83bb95202ad0bf..6602d857421c98a1ed657e7f576661ca45cf7e07 100644
--- a/e2e/kafka/manifests/30-agent.yml
+++ b/e2e/kafka/manifests/30-agent.yml
@@ -30,3 +30,12 @@ spec:
             value: 200ms
           - name: LOG_LEVEL
             value: debug
+        volumeMounts:
+            - name: bpf-kernel-debug
+              mountPath: /sys/kernel/debug
+              mountPropagation: Bidirectional
+      volumes:
+        - name: bpf-kernel-debug
+          hostPath:
+            path: /sys/kernel/debug
+            type: Directory
diff --git a/examples/flowlogs-dump/server/flowlogs-dump-collector.go b/examples/flowlogs-dump/server/flowlogs-dump-collector.go
index 2728e0b3b1f07e184cffd4fb0eb99967377a364f..d04af1c022a14067da099dffeef60c656c9a6449 100644
--- a/examples/flowlogs-dump/server/flowlogs-dump-collector.go
+++ b/examples/flowlogs-dump/server/flowlogs-dump-collector.go
@@ -72,7 +72,7 @@ func main() {
 	for records := range receivedRecords {
 		for _, record := range records.Entries {
 			if record.EthProtocol == ipv6 {
-				log.Printf("%s: %v %s IP %s:%d > %s:%d: protocol:%s type: %d code: %d dir:%d bytes:%d packets:%d flags:%d ends: %v\n",
+				log.Printf("%s: %v %s IP %s:%d > %s:%d: protocol:%s type: %d code: %d dir:%d bytes:%d packets:%d flags:%d ends: %v dnsId: %d dnsFlags: 0x%04x dnsReq: %v dnsRsp: %v\n",
 					ipProto[record.EthProtocol],
 					record.TimeFlowStart.AsTime().Local().Format("15:04:05.000000"),
 					record.Interface,
@@ -81,16 +81,20 @@ func main() {
 					net.IP(record.Network.GetDstAddr().GetIpv6()).To16(),
 					record.Transport.DstPort,
 					protocolByNumber[record.Transport.Protocol],
-					record.Icmp.IcmpType,
-					record.Icmp.IcmpCode,
+					record.IcmpType,
+					record.IcmpCode,
 					record.Direction,
 					record.Bytes,
 					record.Packets,
 					record.Flags,
 					record.TimeFlowEnd.AsTime().Local().Format("15:04:05.000000"),
+					record.GetDnsId(),
+					record.GetDnsFlags(),
+					record.GetTimeDnsReq(),
+					record.GetTimeDnsRsp(),
 				)
 			} else {
-				log.Printf("%s: %v %s IP %s:%d > %s:%d: protocol:%s type: %d code: %d dir:%d bytes:%d packets:%d flags:%d ends: %v\n",
+				log.Printf("%s: %v %s IP %s:%d > %s:%d: protocol:%s type: %d code: %d dir:%d bytes:%d packets:%d flags:%d ends: %v dnsId: %d dnsFlags: 0x%04x dnsReq: %v dnsRsp: %v\n",
 					ipProto[record.EthProtocol],
 					record.TimeFlowStart.AsTime().Local().Format("15:04:05.000000"),
 					record.Interface,
@@ -99,13 +103,17 @@ func main() {
 					ipIntToNetIP(record.Network.GetDstAddr().GetIpv4()).String(),
 					record.Transport.DstPort,
 					protocolByNumber[record.Transport.Protocol],
-					record.Icmp.IcmpType,
-					record.Icmp.IcmpCode,
+					record.IcmpType,
+					record.IcmpCode,
 					record.Direction,
 					record.Bytes,
 					record.Packets,
 					record.Flags,
 					record.TimeFlowEnd.AsTime().Local().Format("15:04:05.000000"),
+					record.GetDnsId(),
+					record.GetDnsFlags(),
+					record.GetTimeDnsReq(),
+					record.GetTimeDnsRsp(),
 				)
 			}
 		}
diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go
index efd44c82115d42b4614cc0c108ccd25c2f5c8240..2553feef640a830a3fa135160a05140a92327e25 100644
--- a/pkg/agent/agent.go
+++ b/pkg/agent/agent.go
@@ -122,7 +122,7 @@ func FlowsAgent(cfg *Config) (*Flows, error) {
 		debug = true
 	}
 
-	fetcher, err := ebpf.NewFlowFetcher(debug, cfg.Sampling, cfg.CacheMaxFlows, ingress, egress)
+	fetcher, err := ebpf.NewFlowFetcher(debug, cfg.Sampling, cfg.CacheMaxFlows, ingress, egress, cfg.EnableTCPDrops, cfg.EnableDNSTracking)
 	if err != nil {
 		return nil, err
 	}
diff --git a/pkg/agent/config.go b/pkg/agent/config.go
index 6dfb49b2dc766e60b7e7b0b3643a57703b8d63c0..7cc1462954a66af09df047574fa959bac3eb7e3b 100644
--- a/pkg/agent/config.go
+++ b/pkg/agent/config.go
@@ -138,4 +138,8 @@ type Config struct {
 	ProfilePort int `env:"PROFILE_PORT"`
 	// EnableGC enables golang garbage collection run at the end of every map eviction, default is true
 	EnableGC bool `env:"ENABLE_GARBAGE_COLLECTION" envDefault:"true"`
+	// EnableTcpDrops enable TCP drops eBPF hook to account for tcp dropped flows
+	EnableTCPDrops bool `env:"ENABLE_TCP_DROPS" envDefault:"false"`
+	// EnableDNSTracking enable DNS tracking eBPF hook to track dns query/response flows
+	EnableDNSTracking bool `env:"ENABLE_DNS_TRACKING" envDefault:"false"`
 }
diff --git a/pkg/ebpf/bpf_bpfeb.go b/pkg/ebpf/bpf_bpfeb.go
index 575ad06813f56522cabcd31def7744b9c15776e0..c1037c2d1e62eef6b72db0810818e1d0f0787a10 100644
--- a/pkg/ebpf/bpf_bpfeb.go
+++ b/pkg/ebpf/bpf_bpfeb.go
@@ -13,6 +13,13 @@ import (
 	"github.com/cilium/ebpf"
 )
 
+type BpfDnsRecordT struct {
+	Id            uint16
+	Flags         uint16
+	ReqMonoTimeTs uint64
+	RspMonoTimeTs uint64
+}
+
 type BpfFlowId BpfFlowIdT
 
 type BpfFlowIdT struct {
@@ -39,6 +46,8 @@ type BpfFlowMetricsT struct {
 	EndMonoTimeTs   uint64
 	Flags           uint16
 	Errno           uint8
+	TcpDrops        BpfTcpDropsT
+	DnsRecord       BpfDnsRecordT
 }
 
 type BpfFlowRecordT struct {
@@ -46,6 +55,14 @@ type BpfFlowRecordT struct {
 	Metrics BpfFlowMetrics
 }
 
+type BpfTcpDropsT struct {
+	Packets         uint32
+	Bytes           uint64
+	LatestFlags     uint16
+	LatestState     uint8
+	LatestDropCause uint32
+}
+
 // LoadBpf returns the embedded CollectionSpec for Bpf.
 func LoadBpf() (*ebpf.CollectionSpec, error) {
 	reader := bytes.NewReader(_BpfBytes)
@@ -89,6 +106,8 @@ type BpfSpecs struct {
 type BpfProgramSpecs struct {
 	EgressFlowParse  *ebpf.ProgramSpec `ebpf:"egress_flow_parse"`
 	IngressFlowParse *ebpf.ProgramSpec `ebpf:"ingress_flow_parse"`
+	KfreeSkb         *ebpf.ProgramSpec `ebpf:"kfree_skb"`
+	TraceNetPackets  *ebpf.ProgramSpec `ebpf:"trace_net_packets"`
 }
 
 // BpfMapSpecs contains maps before they are loaded into the kernel.
@@ -135,12 +154,16 @@ func (m *BpfMaps) Close() error {
 type BpfPrograms struct {
 	EgressFlowParse  *ebpf.Program `ebpf:"egress_flow_parse"`
 	IngressFlowParse *ebpf.Program `ebpf:"ingress_flow_parse"`
+	KfreeSkb         *ebpf.Program `ebpf:"kfree_skb"`
+	TraceNetPackets  *ebpf.Program `ebpf:"trace_net_packets"`
 }
 
 func (p *BpfPrograms) Close() error {
 	return _BpfClose(
 		p.EgressFlowParse,
 		p.IngressFlowParse,
+		p.KfreeSkb,
+		p.TraceNetPackets,
 	)
 }
 
diff --git a/pkg/ebpf/bpf_bpfeb.o b/pkg/ebpf/bpf_bpfeb.o
index f3dd6b1ea09dea6751857a74838cac9ded0401a6..cdaf6f13945bac619aaad7889be15ae739665777 100644
Binary files a/pkg/ebpf/bpf_bpfeb.o and b/pkg/ebpf/bpf_bpfeb.o differ
diff --git a/pkg/ebpf/bpf_bpfel.go b/pkg/ebpf/bpf_bpfel.go
index bae6823f06fc6d62e2f0158244607047254ea7e0..4aa8bfd8ce821b31be21e859b498dacc3541e2f7 100644
--- a/pkg/ebpf/bpf_bpfel.go
+++ b/pkg/ebpf/bpf_bpfel.go
@@ -13,6 +13,13 @@ import (
 	"github.com/cilium/ebpf"
 )
 
+type BpfDnsRecordT struct {
+	Id            uint16
+	Flags         uint16
+	ReqMonoTimeTs uint64
+	RspMonoTimeTs uint64
+}
+
 type BpfFlowId BpfFlowIdT
 
 type BpfFlowIdT struct {
@@ -39,6 +46,8 @@ type BpfFlowMetricsT struct {
 	EndMonoTimeTs   uint64
 	Flags           uint16
 	Errno           uint8
+	TcpDrops        BpfTcpDropsT
+	DnsRecord       BpfDnsRecordT
 }
 
 type BpfFlowRecordT struct {
@@ -46,6 +55,14 @@ type BpfFlowRecordT struct {
 	Metrics BpfFlowMetrics
 }
 
+type BpfTcpDropsT struct {
+	Packets         uint32
+	Bytes           uint64
+	LatestFlags     uint16
+	LatestState     uint8
+	LatestDropCause uint32
+}
+
 // LoadBpf returns the embedded CollectionSpec for Bpf.
 func LoadBpf() (*ebpf.CollectionSpec, error) {
 	reader := bytes.NewReader(_BpfBytes)
@@ -89,6 +106,8 @@ type BpfSpecs struct {
 type BpfProgramSpecs struct {
 	EgressFlowParse  *ebpf.ProgramSpec `ebpf:"egress_flow_parse"`
 	IngressFlowParse *ebpf.ProgramSpec `ebpf:"ingress_flow_parse"`
+	KfreeSkb         *ebpf.ProgramSpec `ebpf:"kfree_skb"`
+	TraceNetPackets  *ebpf.ProgramSpec `ebpf:"trace_net_packets"`
 }
 
 // BpfMapSpecs contains maps before they are loaded into the kernel.
@@ -135,12 +154,16 @@ func (m *BpfMaps) Close() error {
 type BpfPrograms struct {
 	EgressFlowParse  *ebpf.Program `ebpf:"egress_flow_parse"`
 	IngressFlowParse *ebpf.Program `ebpf:"ingress_flow_parse"`
+	KfreeSkb         *ebpf.Program `ebpf:"kfree_skb"`
+	TraceNetPackets  *ebpf.Program `ebpf:"trace_net_packets"`
 }
 
 func (p *BpfPrograms) Close() error {
 	return _BpfClose(
 		p.EgressFlowParse,
 		p.IngressFlowParse,
+		p.KfreeSkb,
+		p.TraceNetPackets,
 	)
 }
 
diff --git a/pkg/ebpf/bpf_bpfel.o b/pkg/ebpf/bpf_bpfel.o
index b6095610414e142b585363b87c85ed31fa4fe8f5..df2199eef85efc66c71fe22bac3642b760bfa572 100644
Binary files a/pkg/ebpf/bpf_bpfel.o and b/pkg/ebpf/bpf_bpfel.o differ
diff --git a/pkg/ebpf/tracer.go b/pkg/ebpf/tracer.go
index 2253c1f4b8abec1044437fcb9acfa478840e8054..29eba8bf1e42da903df43a4ddbf6376c2a20e73d 100644
--- a/pkg/ebpf/tracer.go
+++ b/pkg/ebpf/tracer.go
@@ -8,6 +8,7 @@ import (
 
 	"github.com/cilium/ebpf"
 	"github.com/cilium/ebpf/btf"
+	"github.com/cilium/ebpf/link"
 	"github.com/cilium/ebpf/ringbuf"
 	"github.com/cilium/ebpf/rlimit"
 	"github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces"
@@ -17,7 +18,7 @@ import (
 )
 
 // $BPF_CLANG and $BPF_CFLAGS are set by the Makefile.
-//go:generate bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS -type flow_metrics_t -type flow_id_t -type flow_record_t Bpf ../../bpf/flows.c -- -I../../bpf/headers
+//go:generate bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS -type flow_metrics_t -type flow_id_t -type flow_record_t -type tcp_drops_t -type dns_record_t Bpf ../../bpf/flows.c -- -I../../bpf/headers
 
 const (
 	qdiscType = "clsact"
@@ -34,20 +35,22 @@ var log = logrus.WithField("component", "ebpf.FlowFetcher")
 // and to flows that are forwarded by the kernel via ringbuffer because could not be aggregated
 // in the map
 type FlowFetcher struct {
-	objects        *BpfObjects
-	qdiscs         map[ifaces.Interface]*netlink.GenericQdisc
-	egressFilters  map[ifaces.Interface]*netlink.BpfFilter
-	ingressFilters map[ifaces.Interface]*netlink.BpfFilter
-	ringbufReader  *ringbuf.Reader
-	cacheMaxSize   int
-	enableIngress  bool
-	enableEgress   bool
+	objects              *BpfObjects
+	qdiscs               map[ifaces.Interface]*netlink.GenericQdisc
+	egressFilters        map[ifaces.Interface]*netlink.BpfFilter
+	ingressFilters       map[ifaces.Interface]*netlink.BpfFilter
+	ringbufReader        *ringbuf.Reader
+	cacheMaxSize         int
+	enableIngress        bool
+	enableEgress         bool
+	tcpDropsTracePoint   link.Link
+	dnsTrackerTracePoint link.Link
 }
 
 func NewFlowFetcher(
 	traceMessages bool,
 	sampling, cacheMaxSize int,
-	ingress, egress bool,
+	ingress, egress, tcpDrops, dnsTracker bool,
 ) (*FlowFetcher, error) {
 	if err := rlimit.RemoveMemlock(); err != nil {
 		log.WithError(err).
@@ -73,6 +76,7 @@ func NewFlowFetcher(
 	}); err != nil {
 		return nil, fmt.Errorf("rewriting BPF constants definition: %w", err)
 	}
+
 	if err := spec.LoadAndAssign(&objects, nil); err != nil {
 		var ve *ebpf.VerifierError
 		if errors.As(err, &ve) {
@@ -88,20 +92,39 @@ func NewFlowFetcher(
 	 * for more details.
 	 */
 	btf.FlushKernelSpec()
+
+	var tcpDropsLink link.Link
+	if tcpDrops {
+		tcpDropsLink, err = link.Tracepoint("skb", "kfree_skb", objects.KfreeSkb, nil)
+		if err != nil {
+			return nil, fmt.Errorf("failed to attach the BPF program to kfree_skb tracepoint: %w", err)
+		}
+	}
+
+	var dnsTrackerLink link.Link
+	if dnsTracker {
+		dnsTrackerLink, err = link.Tracepoint("net", "net_dev_queue", objects.TraceNetPackets, nil)
+		if err != nil {
+			return nil, fmt.Errorf("failed to attach the BPF program to trace_net_packets: %w", err)
+		}
+	}
+
 	// read events from igress+egress ringbuffer
 	flows, err := ringbuf.NewReader(objects.DirectFlows)
 	if err != nil {
 		return nil, fmt.Errorf("accessing to ringbuffer: %w", err)
 	}
 	return &FlowFetcher{
-		objects:        &objects,
-		ringbufReader:  flows,
-		egressFilters:  map[ifaces.Interface]*netlink.BpfFilter{},
-		ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{},
-		qdiscs:         map[ifaces.Interface]*netlink.GenericQdisc{},
-		cacheMaxSize:   cacheMaxSize,
-		enableIngress:  ingress,
-		enableEgress:   egress,
+		objects:              &objects,
+		ringbufReader:        flows,
+		egressFilters:        map[ifaces.Interface]*netlink.BpfFilter{},
+		ingressFilters:       map[ifaces.Interface]*netlink.BpfFilter{},
+		qdiscs:               map[ifaces.Interface]*netlink.GenericQdisc{},
+		cacheMaxSize:         cacheMaxSize,
+		enableIngress:        ingress,
+		enableEgress:         egress,
+		tcpDropsTracePoint:   tcpDropsLink,
+		dnsTrackerTracePoint: dnsTrackerLink,
 	}, nil
 }
 
@@ -217,6 +240,14 @@ func (m *FlowFetcher) Close() error {
 	log.Debug("unregistering eBPF objects")
 
 	var errs []error
+
+	if m.tcpDropsTracePoint != nil {
+		m.tcpDropsTracePoint.Close()
+	}
+
+	if m.dnsTrackerTracePoint != nil {
+		m.dnsTrackerTracePoint.Close()
+	}
 	// m.ringbufReader.Read is a blocking operation, so we need to close the ring buffer
 	// from another goroutine to avoid the system not being able to exit if there
 	// isn't traffic in a given interface
diff --git a/pkg/exporter/kafka_proto_test.go b/pkg/exporter/kafka_proto_test.go
index d07028c80cee93155d5f8215ac81bf6457378064..9bdb0b2b81fa97675ccf16254283c9f82783804f 100644
--- a/pkg/exporter/kafka_proto_test.go
+++ b/pkg/exporter/kafka_proto_test.go
@@ -65,7 +65,7 @@ func TestProtoConversion(t *testing.T) {
 	assert.EqualValues(t, 4321, r.Transport.SrcPort)
 	assert.EqualValues(t, 1234, r.Transport.DstPort)
 	assert.EqualValues(t, 210, r.Transport.Protocol)
-	assert.EqualValues(t, 8, r.Icmp.IcmpType)
+	assert.EqualValues(t, 8, r.IcmpType)
 	assert.Equal(t, record.TimeFlowStart.UnixMilli(), r.TimeFlowStart.AsTime().UnixMilli())
 	assert.Equal(t, record.TimeFlowEnd.UnixMilli(), r.TimeFlowEnd.AsTime().UnixMilli())
 	assert.EqualValues(t, 789, r.Bytes)
diff --git a/pkg/exporter/proto.go b/pkg/exporter/proto.go
index 94f9bcea02dfcab9f89ab7ffa927e571c63eaab5..df460b1ba615327ec5fc2220158b590528d22af9 100644
--- a/pkg/exporter/proto.go
+++ b/pkg/exporter/proto.go
@@ -38,7 +38,7 @@ func flowToPB(record *flow.Record) *pbflow.Record {
 }
 
 func v4FlowToPB(fr *flow.Record) *pbflow.Record {
-	return &pbflow.Record{
+	var pbflowRecord = pbflow.Record{
 		EthProtocol: uint32(fr.Id.EthProtocol),
 		Direction:   pbflow.Direction(fr.Id.Direction),
 		DataLink: &pbflow.DataLink{
@@ -54,11 +54,9 @@ func v4FlowToPB(fr *flow.Record) *pbflow.Record {
 			SrcPort:  uint32(fr.Id.SrcPort),
 			DstPort:  uint32(fr.Id.DstPort),
 		},
-		Icmp: &pbflow.Icmp{
-			IcmpType: uint32(fr.Id.IcmpType),
-			IcmpCode: uint32(fr.Id.IcmpCode),
-		},
-		Bytes: fr.Metrics.Bytes,
+		IcmpType: uint32(fr.Id.IcmpType),
+		IcmpCode: uint32(fr.Id.IcmpCode),
+		Bytes:    fr.Metrics.Bytes,
 		TimeFlowStart: &timestamppb.Timestamp{
 			Seconds: fr.TimeFlowStart.Unix(),
 			Nanos:   int32(fr.TimeFlowStart.Nanosecond()),
@@ -67,16 +65,36 @@ func v4FlowToPB(fr *flow.Record) *pbflow.Record {
 			Seconds: fr.TimeFlowEnd.Unix(),
 			Nanos:   int32(fr.TimeFlowEnd.Nanosecond()),
 		},
-		Packets:   uint64(fr.Metrics.Packets),
-		Duplicate: fr.Duplicate,
-		AgentIp:   agentIP(fr.AgentIP),
-		Flags:     uint32(fr.Metrics.Flags),
-		Interface: string(fr.Interface),
+		Packets:                uint64(fr.Metrics.Packets),
+		Duplicate:              fr.Duplicate,
+		AgentIp:                agentIP(fr.AgentIP),
+		Flags:                  uint32(fr.Metrics.Flags),
+		Interface:              string(fr.Interface),
+		TcpDropBytes:           fr.Metrics.TcpDrops.Bytes,
+		TcpDropPackets:         uint64(fr.Metrics.TcpDrops.Packets),
+		TcpDropLatestFlags:     uint32(fr.Metrics.TcpDrops.LatestFlags),
+		TcpDropLatestState:     uint32(fr.Metrics.TcpDrops.LatestState),
+		TcpDropLatestDropCause: fr.Metrics.TcpDrops.LatestDropCause,
+		DnsId:                  uint32(fr.Metrics.DnsRecord.Id),
+		DnsFlags:               uint32(fr.Metrics.DnsRecord.Flags),
+	}
+	if fr.Metrics.DnsRecord.ReqMonoTimeTs != 0 {
+		pbflowRecord.TimeDnsReq = &timestamppb.Timestamp{
+			Seconds: fr.TimeDNSRequest.Unix(),
+			Nanos:   int32(fr.TimeDNSRequest.Nanosecond()),
+		}
+	}
+	if fr.Metrics.DnsRecord.RspMonoTimeTs != 0 {
+		pbflowRecord.TimeDnsRsp = &timestamppb.Timestamp{
+			Seconds: fr.TimeDNSResponse.Unix(),
+			Nanos:   int32(fr.TimeDNSResponse.Nanosecond()),
+		}
 	}
+	return &pbflowRecord
 }
 
 func v6FlowToPB(fr *flow.Record) *pbflow.Record {
-	return &pbflow.Record{
+	var pbflowRecord = pbflow.Record{
 		EthProtocol: uint32(fr.Id.EthProtocol),
 		Direction:   pbflow.Direction(fr.Id.Direction),
 		DataLink: &pbflow.DataLink{
@@ -92,11 +110,9 @@ func v6FlowToPB(fr *flow.Record) *pbflow.Record {
 			SrcPort:  uint32(fr.Id.SrcPort),
 			DstPort:  uint32(fr.Id.DstPort),
 		},
-		Icmp: &pbflow.Icmp{
-			IcmpType: uint32(fr.Id.IcmpType),
-			IcmpCode: uint32(fr.Id.IcmpCode),
-		},
-		Bytes: fr.Metrics.Bytes,
+		IcmpType: uint32(fr.Id.IcmpType),
+		IcmpCode: uint32(fr.Id.IcmpCode),
+		Bytes:    fr.Metrics.Bytes,
 		TimeFlowStart: &timestamppb.Timestamp{
 			Seconds: fr.TimeFlowStart.Unix(),
 			Nanos:   int32(fr.TimeFlowStart.Nanosecond()),
@@ -105,12 +121,32 @@ func v6FlowToPB(fr *flow.Record) *pbflow.Record {
 			Seconds: fr.TimeFlowEnd.Unix(),
 			Nanos:   int32(fr.TimeFlowEnd.Nanosecond()),
 		},
-		Packets:   uint64(fr.Metrics.Packets),
-		Flags:     uint32(fr.Metrics.Flags),
-		Interface: fr.Interface,
-		Duplicate: fr.Duplicate,
-		AgentIp:   agentIP(fr.AgentIP),
+		Packets:                uint64(fr.Metrics.Packets),
+		Flags:                  uint32(fr.Metrics.Flags),
+		Interface:              fr.Interface,
+		Duplicate:              fr.Duplicate,
+		AgentIp:                agentIP(fr.AgentIP),
+		TcpDropBytes:           fr.Metrics.TcpDrops.Bytes,
+		TcpDropPackets:         uint64(fr.Metrics.TcpDrops.Packets),
+		TcpDropLatestFlags:     uint32(fr.Metrics.TcpDrops.LatestFlags),
+		TcpDropLatestState:     uint32(fr.Metrics.TcpDrops.LatestState),
+		TcpDropLatestDropCause: fr.Metrics.TcpDrops.LatestDropCause,
+		DnsId:                  uint32(fr.Metrics.DnsRecord.Id),
+		DnsFlags:               uint32(fr.Metrics.DnsRecord.Flags),
+	}
+	if fr.Metrics.DnsRecord.ReqMonoTimeTs != 0 {
+		pbflowRecord.TimeDnsReq = &timestamppb.Timestamp{
+			Seconds: fr.TimeDNSRequest.Unix(),
+			Nanos:   int32(fr.TimeDNSRequest.Nanosecond()),
+		}
+	}
+	if fr.Metrics.DnsRecord.RspMonoTimeTs != 0 {
+		pbflowRecord.TimeDnsRsp = &timestamppb.Timestamp{
+			Seconds: fr.TimeDNSResponse.Unix(),
+			Nanos:   int32(fr.TimeDNSResponse.Nanosecond()),
+		}
 	}
+	return &pbflowRecord
 }
 
 // Mac bytes are encoded in the same order as in the array. This is, a Mac
diff --git a/pkg/flow/account.go b/pkg/flow/account.go
index a38b8140a4787c488ef0fecf979a3b789ee365f5..b88840e04523abe8db2310b94aab6ceaa6caf149 100644
--- a/pkg/flow/account.go
+++ b/pkg/flow/account.go
@@ -87,7 +87,7 @@ func (c *Accounter) evict(entries map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics, evict
 	monotonicNow := uint64(c.monoClock())
 	records := make([]*Record, 0, len(entries))
 	for key, metrics := range entries {
-		records = append(records, NewRecord(key, *metrics, now, monotonicNow))
+		records = append(records, NewRecord(key, metrics, now, monotonicNow))
 	}
 	alog.WithField("numEntries", len(records)).Debug("records evicted from userspace accounter")
 	evictor <- records
diff --git a/pkg/flow/account_test.go b/pkg/flow/account_test.go
index c53df299c17401c296ccc2895a48913abd8b1206..211348fa78ad7ec8646e9fa47ed3a211b5b45b4f 100644
--- a/pkg/flow/account_test.go
+++ b/pkg/flow/account_test.go
@@ -64,18 +64,30 @@ func TestEvict_MaxEntries(t *testing.T) {
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 123, Packets: 1, StartMonoTimeTs: 123, EndMonoTimeTs: 123, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 123,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 	inputs <- &RawRecord{
 		Id: k2,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 456, Packets: 1, StartMonoTimeTs: 456, EndMonoTimeTs: 456, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 456,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 	inputs <- &RawRecord{
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 321, Packets: 1, StartMonoTimeTs: 789, EndMonoTimeTs: 789, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 789,
+				RspMonoTimeTs: 789,
+			},
 		},
 	}
 	requireNoEviction(t, evictor)
@@ -85,6 +97,10 @@ func TestEvict_MaxEntries(t *testing.T) {
 		Id: k3,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 111, Packets: 1, StartMonoTimeTs: 888, EndMonoTimeTs: 888, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 888,
+				RspMonoTimeTs: 888,
+			},
 		},
 	}
 
@@ -105,20 +121,30 @@ func TestEvict_MaxEntries(t *testing.T) {
 				Id: k1,
 				Metrics: ebpf.BpfFlowMetrics{
 					Bytes: 123, Packets: 1, StartMonoTimeTs: 123, EndMonoTimeTs: 123, Flags: 1,
+					DnsRecord: ebpf.BpfDnsRecordT{
+						ReqMonoTimeTs: 123,
+						RspMonoTimeTs: 0,
+					},
 				},
 			},
-			TimeFlowStart: now.Add(-(1000 - 123) * time.Nanosecond),
-			TimeFlowEnd:   now.Add(-(1000 - 123) * time.Nanosecond),
+			TimeFlowStart:  now.Add(-(1000 - 123) * time.Nanosecond),
+			TimeFlowEnd:    now.Add(-(1000 - 123) * time.Nanosecond),
+			TimeDNSRequest: now.Add(-(1000 - 123) * time.Nanosecond),
 		},
 		k2: {
 			RawRecord: RawRecord{
 				Id: k2,
 				Metrics: ebpf.BpfFlowMetrics{
 					Bytes: 456, Packets: 1, StartMonoTimeTs: 456, EndMonoTimeTs: 456, Flags: 1,
+					DnsRecord: ebpf.BpfDnsRecordT{
+						ReqMonoTimeTs: 456,
+						RspMonoTimeTs: 0,
+					},
 				},
 			},
-			TimeFlowStart: now.Add(-(1000 - 456) * time.Nanosecond),
-			TimeFlowEnd:   now.Add(-(1000 - 456) * time.Nanosecond),
+			TimeFlowStart:  now.Add(-(1000 - 456) * time.Nanosecond),
+			TimeFlowEnd:    now.Add(-(1000 - 456) * time.Nanosecond),
+			TimeDNSRequest: now.Add(-(1000 - 456) * time.Nanosecond),
 		},
 	}, received)
 }
@@ -141,18 +167,30 @@ func TestEvict_Period(t *testing.T) {
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 10, Packets: 1, StartMonoTimeTs: 123, EndMonoTimeTs: 123, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 123,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 	inputs <- &RawRecord{
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 10, Packets: 1, StartMonoTimeTs: 456, EndMonoTimeTs: 456, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 456,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 	inputs <- &RawRecord{
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 10, Packets: 1, StartMonoTimeTs: 789, EndMonoTimeTs: 789, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 789,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 	// Forcing at least one eviction here
@@ -161,12 +199,20 @@ func TestEvict_Period(t *testing.T) {
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 10, Packets: 1, StartMonoTimeTs: 1123, EndMonoTimeTs: 1123, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 1123,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 	inputs <- &RawRecord{
 		Id: k1,
 		Metrics: ebpf.BpfFlowMetrics{
 			Bytes: 10, Packets: 1, StartMonoTimeTs: 1456, EndMonoTimeTs: 1456, Flags: 1,
+			DnsRecord: ebpf.BpfDnsRecordT{
+				ReqMonoTimeTs: 1456,
+				RspMonoTimeTs: 0,
+			},
 		},
 	}
 
@@ -183,10 +229,15 @@ func TestEvict_Period(t *testing.T) {
 				StartMonoTimeTs: 123,
 				EndMonoTimeTs:   123,
 				Flags:           1,
+				DnsRecord: ebpf.BpfDnsRecordT{
+					ReqMonoTimeTs: 123,
+					RspMonoTimeTs: 0,
+				},
 			},
 		},
-		TimeFlowStart: now.Add(-1000 + 123),
-		TimeFlowEnd:   now.Add(-1000 + 123),
+		TimeFlowStart:  now.Add(-1000 + 123),
+		TimeFlowEnd:    now.Add(-1000 + 123),
+		TimeDNSRequest: now.Add(-1000 + 123),
 	}, *records[0])
 	records = receiveTimeout(t, evictor)
 	require.Len(t, records, 1)
@@ -199,10 +250,15 @@ func TestEvict_Period(t *testing.T) {
 				StartMonoTimeTs: 1123,
 				EndMonoTimeTs:   1123,
 				Flags:           1,
+				DnsRecord: ebpf.BpfDnsRecordT{
+					ReqMonoTimeTs: 1123,
+					RspMonoTimeTs: 0,
+				},
 			},
 		},
-		TimeFlowStart: now.Add(-1000 + 1123),
-		TimeFlowEnd:   now.Add(-1000 + 1123),
+		TimeFlowStart:  now.Add(-1000 + 1123),
+		TimeFlowEnd:    now.Add(-1000 + 1123),
+		TimeDNSRequest: now.Add(-1000 + 1123),
 	}, *records[0])
 
 	// no more flows are evicted
diff --git a/pkg/flow/record.go b/pkg/flow/record.go
index 60dc0148a76f7f7eaefd3581ea3e075617a54b0b..d8313b1cd2aaafa0940954b07793475570ee67c6 100644
--- a/pkg/flow/record.go
+++ b/pkg/flow/record.go
@@ -37,9 +37,11 @@ type RawRecord ebpf.BpfFlowRecordT
 type Record struct {
 	RawRecord
 	// TODO: redundant field from RecordMetrics. Reorganize structs
-	TimeFlowStart time.Time
-	TimeFlowEnd   time.Time
-	Interface     string
+	TimeFlowStart   time.Time
+	TimeFlowEnd     time.Time
+	TimeDNSRequest  time.Time
+	TimeDNSResponse time.Time
+	Interface       string
 	// Duplicate tells whether this flow has another duplicate so it has to be excluded from
 	// any metrics' aggregation (e.g. bytes/second rates between two pods).
 	// The reason for this field is that the same flow can be observed from multiple interfaces,
@@ -54,20 +56,30 @@ type Record struct {
 
 func NewRecord(
 	key ebpf.BpfFlowId,
-	metrics ebpf.BpfFlowMetrics,
+	metrics *ebpf.BpfFlowMetrics,
 	currentTime time.Time,
 	monotonicCurrentTime uint64,
 ) *Record {
 	startDelta := time.Duration(monotonicCurrentTime - metrics.StartMonoTimeTs)
 	endDelta := time.Duration(monotonicCurrentTime - metrics.EndMonoTimeTs)
-	return &Record{
+	var reqDNS, rspDNS time.Duration
+	var record = Record{
 		RawRecord: RawRecord{
 			Id:      key,
-			Metrics: metrics,
+			Metrics: *metrics,
 		},
 		TimeFlowStart: currentTime.Add(-startDelta),
 		TimeFlowEnd:   currentTime.Add(-endDelta),
 	}
+	if metrics.DnsRecord.ReqMonoTimeTs != 0 {
+		reqDNS = time.Duration(monotonicCurrentTime - metrics.DnsRecord.ReqMonoTimeTs)
+		record.TimeDNSRequest = currentTime.Add(-reqDNS)
+	}
+	if metrics.DnsRecord.RspMonoTimeTs != 0 {
+		rspDNS = time.Duration(monotonicCurrentTime - metrics.DnsRecord.RspMonoTimeTs)
+		record.TimeDNSResponse = currentTime.Add(-rspDNS)
+	}
+	return &record
 }
 
 // IP returns the net.IP equivalent object
diff --git a/pkg/flow/record_test.go b/pkg/flow/record_test.go
index 8f1fc59b087416a54044e5698c9a6eea9f2f0c75..fc6b0abed70f97b3aea053824d6ba6fae1a5c92e 100644
--- a/pkg/flow/record_test.go
+++ b/pkg/flow/record_test.go
@@ -32,7 +32,17 @@ func TestRecordBinaryEncoding(t *testing.T) {
 		0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, // u64 flow_end_time
 		0x13, 0x14, //flags
 		0x33, // u8 errno
-
+		// tcp_drops structure
+		0x10, 0x11, 0x12, 0x13, // u32 packets
+		0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, // u64 bytes
+		0x1c, 0x1d, //flags
+		0x1e,          // state
+		0x11, 0, 0, 0, //case
+		// dns_record structure
+		01, 00, // id
+		0x80, 00, // flags
+		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, // req ts
+		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, // rsp ts
 	}))
 	require.NoError(t, err)
 
@@ -58,6 +68,19 @@ func TestRecordBinaryEncoding(t *testing.T) {
 			EndMonoTimeTs:   0x1a19181716151413,
 			Flags:           0x1413,
 			Errno:           0x33,
+			TcpDrops: ebpf.BpfTcpDropsT{
+				Packets:         0x13121110,
+				Bytes:           0x1b1a191817161514,
+				LatestFlags:     0x1d1c,
+				LatestState:     0x1e,
+				LatestDropCause: 0x11,
+			},
+			DnsRecord: ebpf.BpfDnsRecordT{
+				Id:            0x0001,
+				Flags:         0x0080,
+				ReqMonoTimeTs: 0x1817161514131211,
+				RspMonoTimeTs: 0x2827262524232221,
+			},
 		},
 	}, *fr)
 	// assert that IP addresses are interpreted as IPv4 addresses
diff --git a/pkg/flow/tracer_map.go b/pkg/flow/tracer_map.go
index e067e34e062818527354265513f6ac09d3c6c299..8a01188939fee12174dcedd8c4941b3441167572 100644
--- a/pkg/flow/tracer_map.go
+++ b/pkg/flow/tracer_map.go
@@ -104,7 +104,7 @@ func (m *MapTracer) evictFlows(ctx context.Context, enableGC bool, forwardFlows
 		}
 		forwardingFlows = append(forwardingFlows, NewRecord(
 			flowKey,
-			*aggregatedMetrics,
+			aggregatedMetrics,
 			currentTime,
 			uint64(monotonicTimeNow),
 		))
diff --git a/pkg/grpc/grpc_test.go b/pkg/grpc/grpc_test.go
index efdd355bf08d3421784a8800c730faed5ac79681..4328dc174a41469688837b3c636d4aba264d98a6 100644
--- a/pkg/grpc/grpc_test.go
+++ b/pkg/grpc/grpc_test.go
@@ -139,9 +139,10 @@ func BenchmarkIPv4GRPCCommunication(b *testing.B) {
 	client := cc.Client()
 
 	f := &pbflow.Record{
-		EthProtocol:   2048,
-		Bytes:         456,
-		Flags:         1,
+		EthProtocol: 2048,
+		Bytes:       456,
+		Flags:       1,
+
 		Direction:     pbflow.Direction_EGRESS,
 		TimeFlowStart: timestamppb.Now(),
 		TimeFlowEnd:   timestamppb.Now(),
@@ -162,10 +163,15 @@ func BenchmarkIPv4GRPCCommunication(b *testing.B) {
 			SrcPort:  23000,
 			DstPort:  443,
 		},
-		Icmp: &pbflow.Icmp{
-			IcmpType: 8,
-			IcmpCode: 10,
-		},
+		IcmpType:               8,
+		IcmpCode:               10,
+		TcpDropBytes:           100,
+		TcpDropPackets:         1,
+		TcpDropLatestFlags:     1,
+		TcpDropLatestState:     2,
+		TcpDropLatestDropCause: 3,
+		TimeDnsReq:             timestamppb.Now(),
+		TimeDnsRsp:             timestamppb.Now(),
 	}
 	records := &pbflow.Records{}
 	for i := 0; i < 100; i++ {
@@ -215,10 +221,17 @@ func BenchmarkIPv6GRPCCommunication(b *testing.B) {
 			SrcPort:  23000,
 			DstPort:  443,
 		},
-		Icmp: &pbflow.Icmp{
-			IcmpType: 8,
-			IcmpCode: 10,
-		},
+		IcmpType:               8,
+		IcmpCode:               10,
+		TcpDropBytes:           100,
+		TcpDropPackets:         1,
+		TcpDropLatestFlags:     1,
+		TcpDropLatestState:     2,
+		TcpDropLatestDropCause: 3,
+		DnsId:                  1,
+		DnsFlags:               100,
+		TimeDnsReq:             timestamppb.Now(),
+		TimeDnsRsp:             timestamppb.Now(),
 	}
 	records := &pbflow.Records{}
 	for i := 0; i < 100; i++ {
diff --git a/pkg/pbflow/flow.pb.go b/pkg/pbflow/flow.pb.go
index af4bee082e66fafb30603ec9590c47271ec89791..60076f902b7fbd7269942076e72975466c261031 100644
--- a/pkg/pbflow/flow.pb.go
+++ b/pkg/pbflow/flow.pb.go
@@ -177,9 +177,19 @@ type Record struct {
 	// From all the duplicate flows, one will set this value to false and the rest will be true.
 	Duplicate bool `protobuf:"varint,11,opt,name=duplicate,proto3" json:"duplicate,omitempty"`
 	// Agent IP address to help identifying the source of the flow
-	AgentIp *IP    `protobuf:"bytes,12,opt,name=agent_ip,json=agentIp,proto3" json:"agent_ip,omitempty"`
-	Flags   uint32 `protobuf:"varint,13,opt,name=flags,proto3" json:"flags,omitempty"`
-	Icmp    *Icmp  `protobuf:"bytes,14,opt,name=icmp,proto3" json:"icmp,omitempty"`
+	AgentIp                *IP                    `protobuf:"bytes,12,opt,name=agent_ip,json=agentIp,proto3" json:"agent_ip,omitempty"`
+	Flags                  uint32                 `protobuf:"varint,13,opt,name=flags,proto3" json:"flags,omitempty"`
+	IcmpType               uint32                 `protobuf:"varint,14,opt,name=icmp_type,json=icmpType,proto3" json:"icmp_type,omitempty"`
+	IcmpCode               uint32                 `protobuf:"varint,15,opt,name=icmp_code,json=icmpCode,proto3" json:"icmp_code,omitempty"`
+	TcpDropBytes           uint64                 `protobuf:"varint,16,opt,name=tcp_drop_bytes,json=tcpDropBytes,proto3" json:"tcp_drop_bytes,omitempty"`
+	TcpDropPackets         uint64                 `protobuf:"varint,17,opt,name=tcp_drop_packets,json=tcpDropPackets,proto3" json:"tcp_drop_packets,omitempty"`
+	TcpDropLatestFlags     uint32                 `protobuf:"varint,18,opt,name=tcp_drop_latest_flags,json=tcpDropLatestFlags,proto3" json:"tcp_drop_latest_flags,omitempty"`
+	TcpDropLatestState     uint32                 `protobuf:"varint,19,opt,name=tcp_drop_latest_state,json=tcpDropLatestState,proto3" json:"tcp_drop_latest_state,omitempty"`
+	TcpDropLatestDropCause uint32                 `protobuf:"varint,20,opt,name=tcp_drop_latest_drop_cause,json=tcpDropLatestDropCause,proto3" json:"tcp_drop_latest_drop_cause,omitempty"`
+	DnsId                  uint32                 `protobuf:"varint,21,opt,name=dns_id,json=dnsId,proto3" json:"dns_id,omitempty"`
+	DnsFlags               uint32                 `protobuf:"varint,22,opt,name=dns_flags,json=dnsFlags,proto3" json:"dns_flags,omitempty"`
+	TimeDnsReq             *timestamppb.Timestamp `protobuf:"bytes,23,opt,name=time_dns_req,json=timeDnsReq,proto3" json:"time_dns_req,omitempty"`
+	TimeDnsRsp             *timestamppb.Timestamp `protobuf:"bytes,24,opt,name=time_dns_rsp,json=timeDnsRsp,proto3" json:"time_dns_rsp,omitempty"`
 }
 
 func (x *Record) Reset() {
@@ -305,9 +315,79 @@ func (x *Record) GetFlags() uint32 {
 	return 0
 }
 
-func (x *Record) GetIcmp() *Icmp {
+func (x *Record) GetIcmpType() uint32 {
 	if x != nil {
-		return x.Icmp
+		return x.IcmpType
+	}
+	return 0
+}
+
+func (x *Record) GetIcmpCode() uint32 {
+	if x != nil {
+		return x.IcmpCode
+	}
+	return 0
+}
+
+func (x *Record) GetTcpDropBytes() uint64 {
+	if x != nil {
+		return x.TcpDropBytes
+	}
+	return 0
+}
+
+func (x *Record) GetTcpDropPackets() uint64 {
+	if x != nil {
+		return x.TcpDropPackets
+	}
+	return 0
+}
+
+func (x *Record) GetTcpDropLatestFlags() uint32 {
+	if x != nil {
+		return x.TcpDropLatestFlags
+	}
+	return 0
+}
+
+func (x *Record) GetTcpDropLatestState() uint32 {
+	if x != nil {
+		return x.TcpDropLatestState
+	}
+	return 0
+}
+
+func (x *Record) GetTcpDropLatestDropCause() uint32 {
+	if x != nil {
+		return x.TcpDropLatestDropCause
+	}
+	return 0
+}
+
+func (x *Record) GetDnsId() uint32 {
+	if x != nil {
+		return x.DnsId
+	}
+	return 0
+}
+
+func (x *Record) GetDnsFlags() uint32 {
+	if x != nil {
+		return x.DnsFlags
+	}
+	return 0
+}
+
+func (x *Record) GetTimeDnsReq() *timestamppb.Timestamp {
+	if x != nil {
+		return x.TimeDnsReq
+	}
+	return nil
+}
+
+func (x *Record) GetTimeDnsRsp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.TimeDnsRsp
 	}
 	return nil
 }
@@ -567,61 +647,6 @@ func (x *Transport) GetProtocol() uint32 {
 	return 0
 }
 
-type Icmp struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	IcmpType uint32 `protobuf:"varint,1,opt,name=icmp_type,json=icmpType,proto3" json:"icmp_type,omitempty"`
-	IcmpCode uint32 `protobuf:"varint,2,opt,name=icmp_code,json=icmpCode,proto3" json:"icmp_code,omitempty"`
-}
-
-func (x *Icmp) Reset() {
-	*x = Icmp{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_proto_flow_proto_msgTypes[7]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
-}
-
-func (x *Icmp) String() string {
-	return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Icmp) ProtoMessage() {}
-
-func (x *Icmp) ProtoReflect() protoreflect.Message {
-	mi := &file_proto_flow_proto_msgTypes[7]
-	if protoimpl.UnsafeEnabled && x != nil {
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		if ms.LoadMessageInfo() == nil {
-			ms.StoreMessageInfo(mi)
-		}
-		return ms
-	}
-	return mi.MessageOf(x)
-}
-
-// Deprecated: Use Icmp.ProtoReflect.Descriptor instead.
-func (*Icmp) Descriptor() ([]byte, []int) {
-	return file_proto_flow_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *Icmp) GetIcmpType() uint32 {
-	if x != nil {
-		return x.IcmpType
-	}
-	return 0
-}
-
-func (x *Icmp) GetIcmpCode() uint32 {
-	if x != nil {
-		return x.IcmpCode
-	}
-	return 0
-}
-
 var File_proto_flow_proto protoreflect.FileDescriptor
 
 var file_proto_flow_proto_rawDesc = []byte{
@@ -633,7 +658,7 @@ var file_proto_flow_proto_rawDesc = []byte{
 	0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72,
 	0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x66, 0x6c,
 	0x6f, 0x77, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69,
-	0x65, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a,
+	0x65, 0x73, 0x22, 0xf0, 0x07, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a,
 	0x0c, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20,
 	0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
 	0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
@@ -666,40 +691,64 @@ var file_proto_flow_proto_rawDesc = []byte{
 	0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a,
 	0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x61, 0x67, 0x65, 0x6e,
 	0x74, 0x49, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0d, 0x20, 0x01,
-	0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x69, 0x63, 0x6d,
-	0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77,
-	0x2e, 0x49, 0x63, 0x6d, 0x70, 0x52, 0x04, 0x69, 0x63, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x44,
-	0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x6d,
-	0x61, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x72, 0x63, 0x4d, 0x61, 0x63,
-	0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28,
-	0x04, 0x52, 0x06, 0x64, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x22, 0x57, 0x0a, 0x07, 0x4e, 0x65, 0x74,
-	0x77, 0x6f, 0x72, 0x6b, 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
-	0x49, 0x50, 0x52, 0x07, 0x73, 0x72, 0x63, 0x41, 0x64, 0x64, 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64,
-	0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e,
-	0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64,
-	0x64, 0x72, 0x22, 0x3d, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14,
-	0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x04,
-	0x69, 0x70, 0x76, 0x36, 0x42, 0x0b, 0x0a, 0x09, 0x69, 0x70, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c,
-	0x79, 0x22, 0x5d, 0x0a, 0x09, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x19,
-	0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
-	0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x73, 0x74,
-	0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x73, 0x74,
-	0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
-	0x22, 0x40, 0x0a, 0x04, 0x49, 0x63, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x63, 0x6d, 0x70,
-	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x63, 0x6d,
-	0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x63, 0x6f,
-	0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x63, 0x6d, 0x70, 0x43, 0x6f,
-	0x64, 0x65, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
-	0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06,
-	0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x32, 0x3e, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c,
-	0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x0f, 0x2e,
-	0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x16,
-	0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f,
-	0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x62,
-	0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x63, 0x6d,
+	0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x63,
+	0x6d, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x63,
+	0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x63, 0x6d, 0x70, 0x43,
+	0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x63, 0x70, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f,
+	0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x63, 0x70,
+	0x44, 0x72, 0x6f, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x63, 0x70,
+	0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x11, 0x20,
+	0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x44, 0x72, 0x6f, 0x70, 0x50, 0x61, 0x63, 0x6b,
+	0x65, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x74, 0x63, 0x70, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f,
+	0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x12, 0x20, 0x01,
+	0x28, 0x0d, 0x52, 0x12, 0x74, 0x63, 0x70, 0x44, 0x72, 0x6f, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x73,
+	0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x74, 0x63, 0x70, 0x5f, 0x64, 0x72,
+	0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+	0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x74, 0x63, 0x70, 0x44, 0x72, 0x6f, 0x70, 0x4c, 0x61,
+	0x74, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x1a, 0x74, 0x63, 0x70,
+	0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x72, 0x6f,
+	0x70, 0x5f, 0x63, 0x61, 0x75, 0x73, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x74,
+	0x63, 0x70, 0x44, 0x72, 0x6f, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x44, 0x72, 0x6f, 0x70,
+	0x43, 0x61, 0x75, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x64, 0x6e, 0x73, 0x5f, 0x69, 0x64, 0x18,
+	0x15, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x6e, 0x73, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+	0x64, 0x6e, 0x73, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0d, 0x52,
+	0x08, 0x64, 0x6e, 0x73, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x74, 0x69, 0x6d,
+	0x65, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x69, 0x6d,
+	0x65, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x12, 0x3c, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+	0x64, 0x6e, 0x73, 0x5f, 0x72, 0x73, 0x70, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x44,
+	0x6e, 0x73, 0x52, 0x73, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e,
+	0x6b, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x04, 0x52, 0x06, 0x73, 0x72, 0x63, 0x4d, 0x61, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73,
+	0x74, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x64, 0x73, 0x74,
+	0x4d, 0x61, 0x63, 0x22, 0x57, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x25,
+	0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x73, 0x72,
+	0x63, 0x41, 0x64, 0x64, 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64,
+	0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77,
+	0x2e, 0x49, 0x50, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x22, 0x3d, 0x0a, 0x02,
+	0x49, 0x50, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07,
+	0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x42, 0x0b,
+	0x0a, 0x09, 0x69, 0x70, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x22, 0x5d, 0x0a, 0x09, 0x54,
+	0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f,
+	0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50,
+	0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a,
+	0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+	0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69,
+	0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45,
+	0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01,
+	0x32, 0x3e, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a,
+	0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x0f, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52,
+	0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
+	0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00,
+	0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
@@ -715,7 +764,7 @@ func file_proto_flow_proto_rawDescGZIP() []byte {
 }
 
 var file_proto_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_proto_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_proto_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
 var file_proto_flow_proto_goTypes = []interface{}{
 	(Direction)(0),                // 0: pbflow.Direction
 	(*CollectorReply)(nil),        // 1: pbflow.CollectorReply
@@ -725,28 +774,28 @@ var file_proto_flow_proto_goTypes = []interface{}{
 	(*Network)(nil),               // 5: pbflow.Network
 	(*IP)(nil),                    // 6: pbflow.IP
 	(*Transport)(nil),             // 7: pbflow.Transport
-	(*Icmp)(nil),                  // 8: pbflow.Icmp
-	(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
+	(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
 }
 var file_proto_flow_proto_depIdxs = []int32{
 	3,  // 0: pbflow.Records.entries:type_name -> pbflow.Record
 	0,  // 1: pbflow.Record.direction:type_name -> pbflow.Direction
-	9,  // 2: pbflow.Record.time_flow_start:type_name -> google.protobuf.Timestamp
-	9,  // 3: pbflow.Record.time_flow_end:type_name -> google.protobuf.Timestamp
+	8,  // 2: pbflow.Record.time_flow_start:type_name -> google.protobuf.Timestamp
+	8,  // 3: pbflow.Record.time_flow_end:type_name -> google.protobuf.Timestamp
 	4,  // 4: pbflow.Record.data_link:type_name -> pbflow.DataLink
 	5,  // 5: pbflow.Record.network:type_name -> pbflow.Network
 	7,  // 6: pbflow.Record.transport:type_name -> pbflow.Transport
 	6,  // 7: pbflow.Record.agent_ip:type_name -> pbflow.IP
-	8,  // 8: pbflow.Record.icmp:type_name -> pbflow.Icmp
-	6,  // 9: pbflow.Network.src_addr:type_name -> pbflow.IP
-	6,  // 10: pbflow.Network.dst_addr:type_name -> pbflow.IP
-	2,  // 11: pbflow.Collector.Send:input_type -> pbflow.Records
-	1,  // 12: pbflow.Collector.Send:output_type -> pbflow.CollectorReply
-	12, // [12:13] is the sub-list for method output_type
-	11, // [11:12] is the sub-list for method input_type
-	11, // [11:11] is the sub-list for extension type_name
-	11, // [11:11] is the sub-list for extension extendee
-	0,  // [0:11] is the sub-list for field type_name
+	8,  // 8: pbflow.Record.time_dns_req:type_name -> google.protobuf.Timestamp
+	8,  // 9: pbflow.Record.time_dns_rsp:type_name -> google.protobuf.Timestamp
+	6,  // 10: pbflow.Network.src_addr:type_name -> pbflow.IP
+	6,  // 11: pbflow.Network.dst_addr:type_name -> pbflow.IP
+	2,  // 12: pbflow.Collector.Send:input_type -> pbflow.Records
+	1,  // 13: pbflow.Collector.Send:output_type -> pbflow.CollectorReply
+	13, // [13:14] is the sub-list for method output_type
+	12, // [12:13] is the sub-list for method input_type
+	12, // [12:12] is the sub-list for extension type_name
+	12, // [12:12] is the sub-list for extension extendee
+	0,  // [0:12] is the sub-list for field type_name
 }
 
 func init() { file_proto_flow_proto_init() }
@@ -839,18 +888,6 @@ func file_proto_flow_proto_init() {
 				return nil
 			}
 		}
-		file_proto_flow_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Icmp); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
 	}
 	file_proto_flow_proto_msgTypes[5].OneofWrappers = []interface{}{
 		(*IP_Ipv4)(nil),
@@ -862,7 +899,7 @@ func file_proto_flow_proto_init() {
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_proto_flow_proto_rawDesc,
 			NumEnums:      1,
-			NumMessages:   8,
+			NumMessages:   7,
 			NumExtensions: 0,
 			NumServices:   1,
 		},
diff --git a/proto/flow.proto b/proto/flow.proto
index c7b459a1d1c489221093d17721769de1478a4121..fb854059fb8eb243edf4f30fc53c576605805e66 100644
--- a/proto/flow.proto
+++ b/proto/flow.proto
@@ -41,7 +41,17 @@ message Record {
   // Agent IP address to help identifying the source of the flow
   IP agent_ip = 12;
   uint32 flags = 13;
-  Icmp   icmp = 14;
+  uint32 icmp_type = 14;
+  uint32 icmp_code = 15;
+  uint64 tcp_drop_bytes = 16;
+  uint64 tcp_drop_packets = 17;
+  uint32 tcp_drop_latest_flags = 18;
+  uint32 tcp_drop_latest_state = 19;
+  uint32 tcp_drop_latest_drop_cause = 20;
+  uint32 dns_id = 21;
+  uint32 dns_flags = 22;
+  google.protobuf.Timestamp time_dns_req = 23;
+  google.protobuf.Timestamp time_dns_rsp = 24;
 }
 
 message DataLink {
@@ -69,11 +79,6 @@ message Transport {
   uint32 protocol = 3;
 }
 
-message Icmp {
-  uint32 icmp_type = 1;
-  uint32 icmp_code = 2;
-}
-
 // as defined by field 61 in
 // https://www.iana.org/assignments/ipfix/ipfix.xhtml
 enum Direction {
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
new file mode 100644
index 0000000000000000000000000000000000000000..bfad1ccedfb5fa85aeafa3c809ae02d02885197b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -0,0 +1,165 @@
+package link
+
+import (
+	"errors"
+	"fmt"
+	"os"
+
+	"github.com/cilium/ebpf"
+)
+
+type cgroupAttachFlags uint32
+
+// cgroup attach flags
+const (
+	flagAllowOverride cgroupAttachFlags = 1 << iota
+	flagAllowMulti
+	flagReplace
+)
+
+type CgroupOptions struct {
+	// Path to a cgroupv2 folder.
+	Path string
+	// One of the AttachCgroup* constants
+	Attach ebpf.AttachType
+	// Program must be of type CGroup*, and the attach type must match Attach.
+	Program *ebpf.Program
+}
+
+// AttachCgroup links a BPF program to a cgroup.
+func AttachCgroup(opts CgroupOptions) (Link, error) {
+	cgroup, err := os.Open(opts.Path)
+	if err != nil {
+		return nil, fmt.Errorf("can't open cgroup: %s", err)
+	}
+
+	clone, err := opts.Program.Clone()
+	if err != nil {
+		cgroup.Close()
+		return nil, err
+	}
+
+	var cg Link
+	cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
+	if errors.Is(err, ErrNotSupported) {
+		cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
+	}
+	if errors.Is(err, ErrNotSupported) {
+		cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
+	}
+	if err != nil {
+		cgroup.Close()
+		clone.Close()
+		return nil, err
+	}
+
+	return cg, nil
+}
+
+type progAttachCgroup struct {
+	cgroup     *os.File
+	current    *ebpf.Program
+	attachType ebpf.AttachType
+	flags      cgroupAttachFlags
+}
+
+var _ Link = (*progAttachCgroup)(nil)
+
+func (cg *progAttachCgroup) isLink() {}
+
+func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
+	if flags&flagAllowMulti > 0 {
+		if err := haveProgAttachReplace(); err != nil {
+			return nil, fmt.Errorf("can't support multiple programs: %w", err)
+		}
+	}
+
+	err := RawAttachProgram(RawAttachProgramOptions{
+		Target:  int(cgroup.Fd()),
+		Program: prog,
+		Flags:   uint32(flags),
+		Attach:  attach,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("cgroup: %w", err)
+	}
+
+	return &progAttachCgroup{cgroup, prog, attach, flags}, nil
+}
+
+func (cg *progAttachCgroup) Close() error {
+	defer cg.cgroup.Close()
+	defer cg.current.Close()
+
+	err := RawDetachProgram(RawDetachProgramOptions{
+		Target:  int(cg.cgroup.Fd()),
+		Program: cg.current,
+		Attach:  cg.attachType,
+	})
+	if err != nil {
+		return fmt.Errorf("close cgroup: %s", err)
+	}
+	return nil
+}
+
+func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
+	new, err := prog.Clone()
+	if err != nil {
+		return err
+	}
+
+	args := RawAttachProgramOptions{
+		Target:  int(cg.cgroup.Fd()),
+		Program: prog,
+		Attach:  cg.attachType,
+		Flags:   uint32(cg.flags),
+	}
+
+	if cg.flags&flagAllowMulti > 0 {
+		// Atomically replacing multiple programs requires at least
+		// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
+		// program in MULTI mode")
+		args.Flags |= uint32(flagReplace)
+		args.Replace = cg.current
+	}
+
+	if err := RawAttachProgram(args); err != nil {
+		new.Close()
+		return fmt.Errorf("can't update cgroup: %s", err)
+	}
+
+	cg.current.Close()
+	cg.current = new
+	return nil
+}
+
+func (cg *progAttachCgroup) Pin(string) error {
+	return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Unpin() error {
+	return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Info() (*Info, error) {
+	return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported)
+}
+
+type linkCgroup struct {
+	RawLink
+}
+
+var _ Link = (*linkCgroup)(nil)
+
+func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
+	link, err := AttachRawLink(RawLinkOptions{
+		Target:  int(cgroup.Fd()),
+		Program: prog,
+		Attach:  attach,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &linkCgroup{*link}, err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..2bde35ed7a2652782e2af666855ee00730495cbc
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/doc.go
@@ -0,0 +1,2 @@
+// Package link allows attaching eBPF programs to various kernel hooks.
+package link
diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2b32ef331cd9ef385149fc13f78afb803f05f1b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/iter.go
@@ -0,0 +1,85 @@
+package link
+
+import (
+	"fmt"
+	"io"
+	"unsafe"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+type IterOptions struct {
+	// Program must be of type Tracing with attach type
+	// AttachTraceIter. The kind of iterator to attach to is
+	// determined at load time via the AttachTo field.
+	//
+	// AttachTo requires the kernel to include BTF of itself,
+	// and it to be compiled with a recent pahole (>= 1.16).
+	Program *ebpf.Program
+
+	// Map specifies the target map for bpf_map_elem and sockmap iterators.
+	// It may be nil.
+	Map *ebpf.Map
+}
+
+// AttachIter attaches a BPF seq_file iterator.
+func AttachIter(opts IterOptions) (*Iter, error) {
+	if err := haveBPFLink(); err != nil {
+		return nil, err
+	}
+
+	progFd := opts.Program.FD()
+	if progFd < 0 {
+		return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+	}
+
+	var info bpfIterLinkInfoMap
+	if opts.Map != nil {
+		mapFd := opts.Map.FD()
+		if mapFd < 0 {
+			return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd)
+		}
+		info.map_fd = uint32(mapFd)
+	}
+
+	attr := sys.LinkCreateIterAttr{
+		ProgFd:      uint32(progFd),
+		AttachType:  sys.AttachType(ebpf.AttachTraceIter),
+		IterInfo:    sys.NewPointer(unsafe.Pointer(&info)),
+		IterInfoLen: uint32(unsafe.Sizeof(info)),
+	}
+
+	fd, err := sys.LinkCreateIter(&attr)
+	if err != nil {
+		return nil, fmt.Errorf("can't link iterator: %w", err)
+	}
+
+	return &Iter{RawLink{fd, ""}}, err
+}
+
+// Iter represents an attached bpf_iter.
+type Iter struct {
+	RawLink
+}
+
+// Open creates a new instance of the iterator.
+//
+// Reading from the returned reader triggers the BPF program.
+func (it *Iter) Open() (io.ReadCloser, error) {
+	attr := &sys.IterCreateAttr{
+		LinkFd: it.fd.Uint(),
+	}
+
+	fd, err := sys.IterCreate(attr)
+	if err != nil {
+		return nil, fmt.Errorf("can't create iterator: %w", err)
+	}
+
+	return fd.File("bpf_iter"), nil
+}
+
+// union bpf_iter_link_info.map
+type bpfIterLinkInfoMap struct {
+	map_fd uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go
new file mode 100644
index 0000000000000000000000000000000000000000..9ce7eb4a4e2be6af993613e2d0ad3e44096f17d4
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe.go
@@ -0,0 +1,574 @@
+package link
+
+import (
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+	kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
+)
+
+type probeType uint8
+
+type probeArgs struct {
+	symbol, group, path          string
+	offset, refCtrOffset, cookie uint64
+	pid, retprobeMaxActive       int
+	ret                          bool
+}
+
+// KprobeOptions defines additional parameters that will be used
+// when loading Kprobes.
+type KprobeOptions struct {
+	// Arbitrary value that can be fetched from an eBPF program
+	// via `bpf_get_attach_cookie()`.
+	//
+	// Needs kernel 5.15+.
+	Cookie uint64
+	// Offset of the kprobe relative to the traced symbol.
+	// Can be used to insert kprobes at arbitrary offsets in kernel functions,
+	// e.g. in places where functions have been inlined.
+	Offset uint64
+	// Increase the maximum number of concurrent invocations of a kretprobe.
+	// Required when tracing some long running functions in the kernel.
+	//
+	// Deprecated: this setting forces the use of an outdated kernel API and is not portable
+	// across kernel versions.
+	RetprobeMaxActive int
+}
+
+const (
+	kprobeType probeType = iota
+	uprobeType
+)
+
+func (pt probeType) String() string {
+	if pt == kprobeType {
+		return "kprobe"
+	}
+	return "uprobe"
+}
+
+func (pt probeType) EventsPath() string {
+	if pt == kprobeType {
+		return kprobeEventsPath
+	}
+	return uprobeEventsPath
+}
+
+func (pt probeType) PerfEventType(ret bool) perfEventType {
+	if pt == kprobeType {
+		if ret {
+			return kretprobeEvent
+		}
+		return kprobeEvent
+	}
+	if ret {
+		return uretprobeEvent
+	}
+	return uprobeEvent
+}
+
+// Kprobe attaches the given eBPF program to a perf event that fires when the
+// given kernel symbol starts executing. See /proc/kallsyms for available
+// symbols. For example, printk():
+//
+//	kp, err := Kprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+	k, err := kprobe(symbol, prog, opts, false)
+	if err != nil {
+		return nil, err
+	}
+
+	lnk, err := attachPerfEvent(k, prog)
+	if err != nil {
+		k.Close()
+		return nil, err
+	}
+
+	return lnk, nil
+}
+
+// Kretprobe attaches the given eBPF program to a perf event that fires right
+// before the given kernel symbol exits, with the function stack left intact.
+// See /proc/kallsyms for available symbols. For example, printk():
+//
+//	kp, err := Kretprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kretprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol
+// incorrectly returns unix.EINVAL instead of os.ErrNotExist.
+func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+	k, err := kprobe(symbol, prog, opts, true)
+	if err != nil {
+		return nil, err
+	}
+
+	lnk, err := attachPerfEvent(k, prog)
+	if err != nil {
+		k.Close()
+		return nil, err
+	}
+
+	return lnk, nil
+}
+
+// isValidKprobeSymbol implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_.]*$".
+func isValidKprobeSymbol(s string) bool {
+	if len(s) < 1 {
+		return false
+	}
+
+	for i, c := range []byte(s) {
+		switch {
+		case c >= 'a' && c <= 'z':
+		case c >= 'A' && c <= 'Z':
+		case c == '_':
+		case i > 0 && c >= '0' && c <= '9':
+
+		// Allow `.` in symbol name. GCC-compiled kernel may change symbol name
+		// to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`.
+		// See: https://gcc.gnu.org/gcc-10/changes.html
+		case i > 0 && c == '.':
+
+		default:
+			return false
+		}
+	}
+
+	return true
+}
+
+// kprobe opens a perf event on the given symbol and attaches prog to it.
+// If ret is true, create a kretprobe.
+func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) {
+	if symbol == "" {
+		return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
+	}
+	if prog == nil {
+		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+	}
+	if !isValidKprobeSymbol(symbol) {
+		return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput)
+	}
+	if prog.Type() != ebpf.Kprobe {
+		return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
+	}
+
+	args := probeArgs{
+		pid:    perfAllThreads,
+		symbol: symbol,
+		ret:    ret,
+	}
+
+	if opts != nil {
+		args.retprobeMaxActive = opts.RetprobeMaxActive
+		args.cookie = opts.Cookie
+		args.offset = opts.Offset
+	}
+
+	// Use kprobe PMU if the kernel has it available.
+	tp, err := pmuKprobe(args)
+	if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+		args.symbol = platformPrefix(symbol)
+		tp, err = pmuKprobe(args)
+	}
+	if err == nil {
+		return tp, nil
+	}
+	if err != nil && !errors.Is(err, ErrNotSupported) {
+		return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err)
+	}
+
+	// Use tracefs if kprobe PMU is missing.
+	args.symbol = symbol
+	tp, err = tracefsKprobe(args)
+	if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+		args.symbol = platformPrefix(symbol)
+		tp, err = tracefsKprobe(args)
+	}
+	if err != nil {
+		return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
+	}
+
+	return tp, nil
+}
+
+// pmuKprobe opens a perf event based on the kprobe PMU.
+// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
+func pmuKprobe(args probeArgs) (*perfEvent, error) {
+	return pmuProbe(kprobeType, args)
+}
+
+// pmuProbe opens a perf event based on a Performance Monitoring Unit.
+//
+// Requires at least a 4.17 kernel.
+// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
+// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
+//
+// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
+func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
+	// Getting the PMU type will fail if the kernel doesn't support
+	// the perf_[k,u]probe PMU.
+	et, err := readUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", typ.String(), "type")
+	if errors.Is(err, os.ErrNotExist) {
+		return nil, fmt.Errorf("%s: %w", typ, ErrNotSupported)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// Use tracefs if we want to set kretprobe's retprobeMaxActive.
+	if args.retprobeMaxActive != 0 {
+		return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported)
+	}
+
+	var config uint64
+	if args.ret {
+		bit, err := readUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", typ.String(), "/format/retprobe")
+		if err != nil {
+			return nil, err
+		}
+		config |= 1 << bit
+	}
+
+	var (
+		attr  unix.PerfEventAttr
+		sp    unsafe.Pointer
+		token string
+	)
+	switch typ {
+	case kprobeType:
+		// Create a pointer to a NUL-terminated string for the kernel.
+		sp, err = unsafeStringPtr(args.symbol)
+		if err != nil {
+			return nil, err
+		}
+
+		token = kprobeToken(args)
+
+		attr = unix.PerfEventAttr{
+			// The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
+			// since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
+			Size:   unix.PERF_ATTR_SIZE_VER1,
+			Type:   uint32(et),          // PMU event type read from sysfs
+			Ext1:   uint64(uintptr(sp)), // Kernel symbol to trace
+			Ext2:   args.offset,         // Kernel symbol offset
+			Config: config,              // Retprobe flag
+		}
+	case uprobeType:
+		sp, err = unsafeStringPtr(args.path)
+		if err != nil {
+			return nil, err
+		}
+
+		if args.refCtrOffset != 0 {
+			config |= args.refCtrOffset << uprobeRefCtrOffsetShift
+		}
+
+		token = uprobeToken(args)
+
+		attr = unix.PerfEventAttr{
+			// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
+			// since it added the config2 (Ext2) field. The Size field controls the
+			// size of the internal buffer the kernel allocates for reading the
+			// perf_event_attr argument from userspace.
+			Size:   unix.PERF_ATTR_SIZE_VER1,
+			Type:   uint32(et),          // PMU event type read from sysfs
+			Ext1:   uint64(uintptr(sp)), // Uprobe path
+			Ext2:   args.offset,         // Uprobe offset
+			Config: config,              // RefCtrOffset, Retprobe flag
+		}
+	}
+
+	rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+
+	// On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
+	// return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
+	// https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
+	if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") {
+		return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported)
+	}
+	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+	// when trying to create a retprobe for a missing symbol.
+	if errors.Is(err, os.ErrNotExist) {
+		return nil, fmt.Errorf("token %s: not found: %w", token, err)
+	}
+	// Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+	// to an invalid insn boundary. The exact conditions that trigger this error are
+	// arch specific however.
+	if errors.Is(err, unix.EILSEQ) {
+		return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+	}
+	// Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
+	// when attempting to set a uprobe on a trap instruction.
+	if errors.Is(err, sys.ENOTSUPP) {
+		return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.offset, err)
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("token %s: opening perf event: %w", token, err)
+	}
+
+	// Ensure the string pointer is not collected before PerfEventOpen returns.
+	runtime.KeepAlive(sp)
+
+	fd, err := sys.NewFD(rawFd)
+	if err != nil {
+		return nil, err
+	}
+
+	// Kernel has perf_[k,u]probe PMU available, initialize perf event.
+	return &perfEvent{
+		typ:    typ.PerfEventType(args.ret),
+		name:   args.symbol,
+		pmuID:  et,
+		cookie: args.cookie,
+		fd:     fd,
+	}, nil
+}
+
+// tracefsKprobe creates a Kprobe tracefs entry.
+func tracefsKprobe(args probeArgs) (*perfEvent, error) {
+	return tracefsProbe(kprobeType, args)
+}
+
+// tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
+// A new trace event group name is generated on every call to support creating
+// multiple trace events for the same kernel or userspace symbol.
+// Path and offset are only set in the case of uprobe(s) and are used to set
+// the executable/library path on the filesystem and the offset where the probe is inserted.
+// A perf event is then opened on the newly-created trace event and returned to the caller.
+func tracefsProbe(typ probeType, args probeArgs) (*perfEvent, error) {
+	// Generate a random string for each trace event we attempt to create.
+	// This value is used as the 'group' token in tracefs to allow creating
+	// multiple kprobe trace events with the same name.
+	group, err := randomGroup("ebpf")
+	if err != nil {
+		return nil, fmt.Errorf("randomizing group name: %w", err)
+	}
+	args.group = group
+
+	// Create the [k,u]probe trace event using tracefs.
+	tid, err := createTraceFSProbeEvent(typ, args)
+	if err != nil {
+		return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
+	}
+
+	// Kprobes are ephemeral tracepoints and share the same perf event type.
+	fd, err := openTracepointPerfEvent(tid, args.pid)
+	if err != nil {
+		// Make sure we clean up the created tracefs event when we return error.
+		// If a livepatch handler is already active on the symbol, the write to
+		// tracefs will succeed, a trace event will show up, but creating the
+		// perf event will fail with EBUSY.
+		_ = closeTraceFSProbeEvent(typ, args.group, args.symbol)
+		return nil, err
+	}
+
+	return &perfEvent{
+		typ:       typ.PerfEventType(args.ret),
+		group:     group,
+		name:      args.symbol,
+		tracefsID: tid,
+		cookie:    args.cookie,
+		fd:        fd,
+	}, nil
+}
+
+var errInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
+
+// createTraceFSProbeEvent creates a new ephemeral trace event.
+//
+// Returns os.ErrNotExist if symbol is not a valid
+// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
+// if a probe with the same group and symbol already exists. Returns an error if
+// args.retprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
+// the kernel is too old to support kretprobe maxactive.
+func createTraceFSProbeEvent(typ probeType, args probeArgs) (uint64, error) {
+	// Before attempting to create a trace event through tracefs,
+	// check if an event with the same group and name already exists.
+	// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
+	// entry, so we need to rely on reads for detecting uniqueness.
+	_, err := getTraceEventID(args.group, args.symbol)
+	if err == nil {
+		return 0, fmt.Errorf("trace event %s/%s: %w", args.group, args.symbol, os.ErrExist)
+	}
+	if err != nil && !errors.Is(err, os.ErrNotExist) {
+		return 0, fmt.Errorf("checking trace event %s/%s: %w", args.group, args.symbol, err)
+	}
+
+	// Open the kprobe_events file in tracefs.
+	f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
+	if err != nil {
+		return 0, fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
+	}
+	defer f.Close()
+
+	var pe, token string
+	switch typ {
+	case kprobeType:
+		// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
+		// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+		// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+		// -:[GRP/]EVENT                                        : Clear a probe
+		//
+		// Some examples:
+		// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
+		// p:ebpf_5678/p_my_kprobe __x64_sys_execve
+		//
+		// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
+		// kernel default to NR_CPUS. This is desired in most eBPF cases since
+		// subsampling or rate limiting logic can be more accurately implemented in
+		// the eBPF program itself.
+		// See Documentation/kprobes.txt for more details.
+		if args.retprobeMaxActive != 0 && !args.ret {
+			return 0, errInvalidMaxActive
+		}
+		token = kprobeToken(args)
+		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret, args.retprobeMaxActive), args.group, sanitizeSymbol(args.symbol), token)
+	case uprobeType:
+		// The uprobe_events syntax is as follows:
+		// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
+		// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
+		// -:[GRP/]EVENT                           : Clear a probe
+		//
+		// Some examples:
+		// r:ebpf_1234/readline /bin/bash:0x12345
+		// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
+		//
+		// See Documentation/trace/uprobetracer.txt for more details.
+		if args.retprobeMaxActive != 0 {
+			return 0, errInvalidMaxActive
+		}
+		token = uprobeToken(args)
+		pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret, 0), args.group, args.symbol, token)
+	}
+	_, err = f.WriteString(pe)
+
+	// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+	// when trying to create a retprobe for a missing symbol.
+	if errors.Is(err, os.ErrNotExist) {
+		return 0, fmt.Errorf("token %s: not found: %w", token, err)
+	}
+	// Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+	// to an invalid insn boundary. The exact conditions that trigger this error are
+	// arch specific however.
+	if errors.Is(err, syscall.EILSEQ) {
+		return 0, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+	}
+	// ERANGE is returned when the `SYM[+offs]` token is too big and cannot
+	// be resolved.
+	if errors.Is(err, syscall.ERANGE) {
+		return 0, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
+	}
+
+	if err != nil {
+		return 0, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
+	}
+
+	// Get the newly-created trace event's id.
+	tid, err := getTraceEventID(args.group, args.symbol)
+	if args.retprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
+		// Kernels < 4.12 don't support maxactive and therefore auto generate
+		// group and event names from the symbol and offset. The symbol is used
+		// without any sanitization.
+		// See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
+		event := fmt.Sprintf("kprobes/r_%s_%d", args.symbol, args.offset)
+		if err := removeTraceFSProbeEvent(typ, event); err != nil {
+			return 0, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
+		}
+		return 0, fmt.Errorf("create trace event with non-default maxactive: %w", ErrNotSupported)
+	}
+	if err != nil {
+		return 0, fmt.Errorf("get trace event id: %w", err)
+	}
+
+	return tid, nil
+}
+
+// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
+// from <tracefs>/[k,u]probe_events.
+func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
+	pe := fmt.Sprintf("%s/%s", group, sanitizeSymbol(symbol))
+	return removeTraceFSProbeEvent(typ, pe)
+}
+
+func removeTraceFSProbeEvent(typ probeType, pe string) error {
+	f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
+	if err != nil {
+		return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
+	}
+	defer f.Close()
+
+	// See [k,u]probe_events syntax above. The probe type does not need to be specified
+	// for removals.
+	if _, err = f.WriteString("-:" + pe); err != nil {
+		return fmt.Errorf("remove event %q from %s: %w", pe, typ.EventsPath(), err)
+	}
+
+	return nil
+}
+
+// randomGroup generates a pseudorandom string for use as a tracefs group name.
+// Returns an error when the output string would exceed 63 characters (kernel
+// limitation), when rand.Read() fails or when prefix contains characters not
+// allowed by isValidTraceID.
+func randomGroup(prefix string) (string, error) {
+	if !isValidTraceID(prefix) {
+		return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
+	}
+
+	b := make([]byte, 8)
+	if _, err := rand.Read(b); err != nil {
+		return "", fmt.Errorf("reading random bytes: %w", err)
+	}
+
+	group := fmt.Sprintf("%s_%x", prefix, b)
+	if len(group) > 63 {
+		return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput)
+	}
+
+	return group, nil
+}
+
+func probePrefix(ret bool, maxActive int) string {
+	if ret {
+		if maxActive > 0 {
+			return fmt.Sprintf("r%d", maxActive)
+		}
+		return "r"
+	}
+	return "p"
+}
+
+// kprobeToken creates the SYM[+offs] token for the tracefs api.
+func kprobeToken(args probeArgs) string {
+	po := args.symbol
+
+	if args.offset != 0 {
+		po += fmt.Sprintf("+%#x", args.offset)
+	}
+
+	return po
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
new file mode 100644
index 0000000000000000000000000000000000000000..151f47d6687aae6e01628a17879b6f5f9832f345
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
@@ -0,0 +1,180 @@
+package link
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"unsafe"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/asm"
+	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+// KprobeMultiOptions defines additional parameters that will be used
+// when opening a KprobeMulti Link.
+type KprobeMultiOptions struct {
+	// Symbols takes a list of kernel symbol names to attach an ebpf program to.
+	//
+	// Mutually exclusive with Addresses.
+	Symbols []string
+
+	// Addresses takes a list of kernel symbol addresses in case they can not
+	// be referred to by name.
+	//
+	// Note that only start addresses can be specified, since the fprobe API
+	// limits the attach point to the function entry or return.
+	//
+	// Mutually exclusive with Symbols.
+	Addresses []uint64
+
+	// Cookies specifies arbitrary values that can be fetched from an eBPF
+	// program via `bpf_get_attach_cookie()`.
+	//
+	// If set, its length should be equal to the length of Symbols or Addresses.
+	// Each Cookie is assigned to the Symbol or Address specified at the
+	// corresponding slice index.
+	Cookies []uint64
+}
+
+// KprobeMulti attaches the given eBPF program to the entry point of a given set
+// of kernel symbols.
+//
+// The difference with Kprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+	return kprobeMulti(prog, opts, 0)
+}
+
+// KretprobeMulti attaches the given eBPF program to the return point of a given
+// set of kernel symbols.
+//
+// The difference with Kretprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+	return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN)
+}
+
+func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) {
+	if prog == nil {
+		return nil, errors.New("cannot attach a nil program")
+	}
+
+	syms := uint32(len(opts.Symbols))
+	addrs := uint32(len(opts.Addresses))
+	cookies := uint32(len(opts.Cookies))
+
+	if syms == 0 && addrs == 0 {
+		return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput)
+	}
+	if syms != 0 && addrs != 0 {
+		return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput)
+	}
+	if cookies > 0 && cookies != syms && cookies != addrs {
+		return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput)
+	}
+
+	if err := haveBPFLinkKprobeMulti(); err != nil {
+		return nil, err
+	}
+
+	attr := &sys.LinkCreateKprobeMultiAttr{
+		ProgFd:           uint32(prog.FD()),
+		AttachType:       sys.BPF_TRACE_KPROBE_MULTI,
+		KprobeMultiFlags: flags,
+	}
+
+	switch {
+	case syms != 0:
+		attr.Count = syms
+		attr.Syms = sys.NewStringSlicePointer(opts.Symbols)
+
+	case addrs != 0:
+		attr.Count = addrs
+		attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0]))
+	}
+
+	if cookies != 0 {
+		attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0]))
+	}
+
+	fd, err := sys.LinkCreateKprobeMulti(attr)
+	if errors.Is(err, unix.ESRCH) {
+		return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist)
+	}
+	if errors.Is(err, unix.EINVAL) {
+		return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &kprobeMultiLink{RawLink{fd, ""}}, nil
+}
+
+type kprobeMultiLink struct {
+	RawLink
+}
+
+var _ Link = (*kprobeMultiLink)(nil)
+
+func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error {
+	return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Pin(string) error {
+	return fmt.Errorf("pin kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Unpin() error {
+	return fmt.Errorf("unpin kprobe_multi: %w", ErrNotSupported)
+}
+
+var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
+	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+		Name: "probe_kpm_link",
+		Type: ebpf.Kprobe,
+		Instructions: asm.Instructions{
+			asm.Mov.Imm(asm.R0, 0),
+			asm.Return(),
+		},
+		AttachType: ebpf.AttachTraceKprobeMulti,
+		License:    "MIT",
+	})
+	if errors.Is(err, unix.E2BIG) {
+		// Kernel doesn't support AttachType field.
+		return internal.ErrNotSupported
+	}
+	if err != nil {
+		return err
+	}
+	defer prog.Close()
+
+	fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{
+		ProgFd:     uint32(prog.FD()),
+		AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+		Count:      1,
+		Syms:       sys.NewStringSlicePointer([]string{"vprintk"}),
+	})
+	switch {
+	case errors.Is(err, unix.EINVAL):
+		return internal.ErrNotSupported
+	// If CONFIG_FPROBE isn't set.
+	case errors.Is(err, unix.EOPNOTSUPP):
+		return internal.ErrNotSupported
+	case err != nil:
+		return err
+	}
+
+	fd.Close()
+
+	return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
new file mode 100644
index 0000000000000000000000000000000000000000..d4eeb92de0fe38c46c6c8cf9f752318da02021b0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -0,0 +1,315 @@
+package link
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/btf"
+	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+var ErrNotSupported = internal.ErrNotSupported
+
+// Link represents a Program attached to a BPF hook.
+type Link interface {
+	// Replace the current program with a new program.
+	//
+	// Passing a nil program is an error. May return an error wrapping ErrNotSupported.
+	Update(*ebpf.Program) error
+
+	// Persist a link by pinning it into a bpffs.
+	//
+	// May return an error wrapping ErrNotSupported.
+	Pin(string) error
+
+	// Undo a previous call to Pin.
+	//
+	// May return an error wrapping ErrNotSupported.
+	Unpin() error
+
+	// Close frees resources.
+	//
+	// The link will be broken unless it has been successfully pinned.
+	// A link may continue past the lifetime of the process if Close is
+	// not called.
+	Close() error
+
+	// Info returns metadata on a link.
+	//
+	// May return an error wrapping ErrNotSupported.
+	Info() (*Info, error)
+
+	// Prevent external users from implementing this interface.
+	isLink()
+}
+
+// LoadPinnedLink loads a link that was persisted into a bpffs.
+func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
+	raw, err := loadPinnedRawLink(fileName, opts)
+	if err != nil {
+		return nil, err
+	}
+
+	return wrapRawLink(raw)
+}
+
+// wrap a RawLink in a more specific type if possible.
+//
+// The function takes ownership of raw and closes it on error.
+func wrapRawLink(raw *RawLink) (Link, error) {
+	info, err := raw.Info()
+	if err != nil {
+		raw.Close()
+		return nil, err
+	}
+
+	switch info.Type {
+	case RawTracepointType:
+		return &rawTracepoint{*raw}, nil
+	case TracingType:
+		return &tracing{*raw}, nil
+	case CgroupType:
+		return &linkCgroup{*raw}, nil
+	case IterType:
+		return &Iter{*raw}, nil
+	case NetNsType:
+		return &NetNsLink{*raw}, nil
+	default:
+		return raw, nil
+	}
+}
+
+// ID uniquely identifies a BPF link.
+type ID = sys.LinkID
+
+// RawLinkOptions control the creation of a raw link.
+type RawLinkOptions struct {
+	// File descriptor to attach to. This differs for each attach type.
+	Target int
+	// Program to attach.
+	Program *ebpf.Program
+	// Attach must match the attach type of Program.
+	Attach ebpf.AttachType
+	// BTF is the BTF of the attachment target.
+	BTF btf.TypeID
+	// Flags control the attach behaviour.
+	Flags uint32
+}
+
+// Info contains metadata on a link.
+type Info struct {
+	Type    Type
+	ID      ID
+	Program ebpf.ProgramID
+	extra   interface{}
+}
+
+type TracingInfo sys.TracingLinkInfo
+type CgroupInfo sys.CgroupLinkInfo
+type NetNsInfo sys.NetNsLinkInfo
+type XDPInfo sys.XDPLinkInfo
+
+// Tracing returns tracing type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Tracing() *TracingInfo {
+	e, _ := r.extra.(*TracingInfo)
+	return e
+}
+
+// Cgroup returns cgroup type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Cgroup() *CgroupInfo {
+	e, _ := r.extra.(*CgroupInfo)
+	return e
+}
+
+// NetNs returns netns type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) NetNs() *NetNsInfo {
+	e, _ := r.extra.(*NetNsInfo)
+	return e
+}
+
+// ExtraNetNs returns XDP type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) XDP() *XDPInfo {
+	e, _ := r.extra.(*XDPInfo)
+	return e
+}
+
+// RawLink is the low-level API to bpf_link.
+//
+// You should consider using the higher level interfaces in this
+// package instead.
+type RawLink struct {
+	fd         *sys.FD
+	pinnedPath string
+}
+
+// AttachRawLink creates a raw link.
+func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
+	if err := haveBPFLink(); err != nil {
+		return nil, err
+	}
+
+	if opts.Target < 0 {
+		return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd)
+	}
+
+	progFd := opts.Program.FD()
+	if progFd < 0 {
+		return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+	}
+
+	attr := sys.LinkCreateAttr{
+		TargetFd:    uint32(opts.Target),
+		ProgFd:      uint32(progFd),
+		AttachType:  sys.AttachType(opts.Attach),
+		TargetBtfId: uint32(opts.BTF),
+		Flags:       opts.Flags,
+	}
+	fd, err := sys.LinkCreate(&attr)
+	if err != nil {
+		return nil, fmt.Errorf("create link: %w", err)
+	}
+
+	return &RawLink{fd, ""}, nil
+}
+
+func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) {
+	fd, err := sys.ObjGet(&sys.ObjGetAttr{
+		Pathname:  sys.NewStringPointer(fileName),
+		FileFlags: opts.Marshal(),
+	})
+	if err != nil {
+		return nil, fmt.Errorf("load pinned link: %w", err)
+	}
+
+	return &RawLink{fd, fileName}, nil
+}
+
+func (l *RawLink) isLink() {}
+
+// FD returns the raw file descriptor.
+func (l *RawLink) FD() int {
+	return l.fd.Int()
+}
+
+// Close breaks the link.
+//
+// Use Pin if you want to make the link persistent.
+func (l *RawLink) Close() error {
+	return l.fd.Close()
+}
+
+// Pin persists a link past the lifetime of the process.
+//
+// Calling Close on a pinned Link will not break the link
+// until the pin is removed.
+func (l *RawLink) Pin(fileName string) error {
+	if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil {
+		return err
+	}
+	l.pinnedPath = fileName
+	return nil
+}
+
+// Unpin implements the Link interface.
+func (l *RawLink) Unpin() error {
+	if err := internal.Unpin(l.pinnedPath); err != nil {
+		return err
+	}
+	l.pinnedPath = ""
+	return nil
+}
+
+// IsPinned returns true if the Link has a non-empty pinned path.
+func (l *RawLink) IsPinned() bool {
+	return l.pinnedPath != ""
+}
+
+// Update implements the Link interface.
+func (l *RawLink) Update(new *ebpf.Program) error {
+	return l.UpdateArgs(RawLinkUpdateOptions{
+		New: new,
+	})
+}
+
+// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs.
+type RawLinkUpdateOptions struct {
+	New   *ebpf.Program
+	Old   *ebpf.Program
+	Flags uint32
+}
+
+// UpdateArgs updates a link based on args.
+func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
+	newFd := opts.New.FD()
+	if newFd < 0 {
+		return fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+	}
+
+	var oldFd int
+	if opts.Old != nil {
+		oldFd = opts.Old.FD()
+		if oldFd < 0 {
+			return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd)
+		}
+	}
+
+	attr := sys.LinkUpdateAttr{
+		LinkFd:    l.fd.Uint(),
+		NewProgFd: uint32(newFd),
+		OldProgFd: uint32(oldFd),
+		Flags:     opts.Flags,
+	}
+	return sys.LinkUpdate(&attr)
+}
+
+// Info returns metadata about the link.
+func (l *RawLink) Info() (*Info, error) {
+	var info sys.LinkInfo
+
+	if err := sys.ObjInfo(l.fd, &info); err != nil {
+		return nil, fmt.Errorf("link info: %s", err)
+	}
+
+	var extra interface{}
+	switch info.Type {
+	case CgroupType:
+		extra = &CgroupInfo{}
+	case NetNsType:
+		extra = &NetNsInfo{}
+	case TracingType:
+		extra = &TracingInfo{}
+	case XDPType:
+		extra = &XDPInfo{}
+	case RawTracepointType, IterType,
+		PerfEventType, KprobeMultiType:
+		// Extra metadata not supported.
+	default:
+		return nil, fmt.Errorf("unknown link info type: %d", info.Type)
+	}
+
+	if extra != nil {
+		buf := bytes.NewReader(info.Extra[:])
+		err := binary.Read(buf, internal.NativeEndian, extra)
+		if err != nil {
+			return nil, fmt.Errorf("cannot read extra link info: %w", err)
+		}
+	}
+
+	return &Info{
+		info.Type,
+		info.Id,
+		ebpf.ProgramID(info.ProgId),
+		extra,
+	}, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go
new file mode 100644
index 0000000000000000000000000000000000000000..344ecced6beab5b933ec1b18205a15588c28c5d9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/netns.go
@@ -0,0 +1,36 @@
+package link
+
+import (
+	"fmt"
+
+	"github.com/cilium/ebpf"
+)
+
+// NetNsLink is a program attached to a network namespace.
+type NetNsLink struct {
+	RawLink
+}
+
+// AttachNetNs attaches a program to a network namespace.
+func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
+	var attach ebpf.AttachType
+	switch t := prog.Type(); t {
+	case ebpf.FlowDissector:
+		attach = ebpf.AttachFlowDissector
+	case ebpf.SkLookup:
+		attach = ebpf.AttachSkLookup
+	default:
+		return nil, fmt.Errorf("can't attach %v to network namespace", t)
+	}
+
+	link, err := AttachRawLink(RawLinkOptions{
+		Target:  ns,
+		Program: prog,
+		Attach:  attach,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &NetNsLink{*link}, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go
new file mode 100644
index 0000000000000000000000000000000000000000..61f80627a019ad7a6d032606479aafb41c2b9999
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/perf_event.go
@@ -0,0 +1,434 @@
+package link
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"sync"
+	"unsafe"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/asm"
+	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+// Getting the terminology right is usually the hardest part. For posterity and
+// for staying sane during implementation:
+//
+// - trace event: Representation of a kernel runtime hook. Filesystem entries
+//   under <tracefs>/events. Can be tracepoints (static), kprobes or uprobes.
+//   Can be instantiated into perf events (see below).
+// - tracepoint: A predetermined hook point in the kernel. Exposed as trace
+//   events in (sub)directories under <tracefs>/events. Cannot be closed or
+//   removed, they are static.
+// - k(ret)probe: Ephemeral trace events based on entry or exit points of
+//   exported kernel symbols. kprobe-based (tracefs) trace events can be
+//   created system-wide by writing to the <tracefs>/kprobe_events file, or
+//   they can be scoped to the current process by creating PMU perf events.
+// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
+//   and offsets. uprobe-based (tracefs) trace events can be
+//   created system-wide by writing to the <tracefs>/uprobe_events file, or
+//   they can be scoped to the current process by creating PMU perf events.
+// - perf event: An object instantiated based on an existing trace event or
+//   kernel symbol. Referred to by fd in userspace.
+//   Exactly one eBPF program can be attached to a perf event. Multiple perf
+//   events can be created from a single trace event. Closing a perf event
+//   stops any further invocations of the attached eBPF program.
+
+var (
+	tracefsPath = "/sys/kernel/debug/tracing"
+
+	errInvalidInput = errors.New("invalid input")
+)
+
+const (
+	perfAllThreads = -1
+)
+
+type perfEventType uint8
+
+const (
+	tracepointEvent perfEventType = iota
+	kprobeEvent
+	kretprobeEvent
+	uprobeEvent
+	uretprobeEvent
+)
+
+// A perfEvent represents a perf event kernel object. Exactly one eBPF program
+// can be attached to it. It is created based on a tracefs trace event or a
+// Performance Monitoring Unit (PMU).
+type perfEvent struct {
+	// The event type determines the types of programs that can be attached.
+	typ perfEventType
+
+	// Group and name of the tracepoint/kprobe/uprobe.
+	group string
+	name  string
+
+	// PMU event ID read from sysfs. Valid IDs are non-zero.
+	pmuID uint64
+	// ID of the trace event read from tracefs. Valid IDs are non-zero.
+	tracefsID uint64
+
+	// User provided arbitrary value.
+	cookie uint64
+
+	// This is the perf event FD.
+	fd *sys.FD
+}
+
+func (pe *perfEvent) Close() error {
+	if err := pe.fd.Close(); err != nil {
+		return fmt.Errorf("closing perf event fd: %w", err)
+	}
+
+	switch pe.typ {
+	case kprobeEvent, kretprobeEvent:
+		// Clean up kprobe tracefs entry.
+		if pe.tracefsID != 0 {
+			return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
+		}
+	case uprobeEvent, uretprobeEvent:
+		// Clean up uprobe tracefs entry.
+		if pe.tracefsID != 0 {
+			return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
+		}
+	case tracepointEvent:
+		// Tracepoint trace events don't hold any extra resources.
+		return nil
+	}
+
+	return nil
+}
+
+// perfEventLink represents a bpf perf link.
+type perfEventLink struct {
+	RawLink
+	pe *perfEvent
+}
+
+func (pl *perfEventLink) isLink() {}
+
+// Pinning requires the underlying perf event FD to stay open.
+//
+// | PerfEvent FD | BpfLink FD | Works |
+// |--------------|------------|-------|
+// | Open         | Open       | Yes   |
+// | Closed       | Open       | No    |
+// | Open         | Closed     | No (Pin() -> EINVAL) |
+// | Closed       | Closed     | No (Pin() -> EINVAL) |
+//
+// There is currently no pretty way to recover the perf event FD
+// when loading a pinned link, so leave as not supported for now.
+func (pl *perfEventLink) Pin(string) error {
+	return fmt.Errorf("perf event link pin: %w", ErrNotSupported)
+}
+
+func (pl *perfEventLink) Unpin() error {
+	return fmt.Errorf("perf event link unpin: %w", ErrNotSupported)
+}
+
+func (pl *perfEventLink) Close() error {
+	if err := pl.pe.Close(); err != nil {
+		return fmt.Errorf("perf event link close: %w", err)
+	}
+	return pl.fd.Close()
+}
+
+func (pl *perfEventLink) Update(prog *ebpf.Program) error {
+	return fmt.Errorf("perf event link update: %w", ErrNotSupported)
+}
+
+// perfEventIoctl implements Link and handles the perf event lifecycle
+// via ioctl().
+type perfEventIoctl struct {
+	*perfEvent
+}
+
+func (pi *perfEventIoctl) isLink() {}
+
+// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
+// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
+// owned by the perf event, which means multiple programs can be attached
+// simultaneously.
+//
+// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
+// returns EEXIST.
+//
+// Detaching a program from a perf event is currently not possible, so a
+// program replacement mechanism cannot be implemented for perf events.
+func (pi *perfEventIoctl) Update(prog *ebpf.Program) error {
+	return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Pin(string) error {
+	return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Unpin() error {
+	return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Info() (*Info, error) {
+	return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported)
+}
+
+// attach the given eBPF prog to the perf event stored in pe.
+// pe must contain a valid perf event fd.
+// prog's type must match the program type stored in pe.
+func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
+	if prog == nil {
+		return nil, errors.New("cannot attach a nil program")
+	}
+	if prog.FD() < 0 {
+		return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+	}
+
+	switch pe.typ {
+	case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
+		if t := prog.Type(); t != ebpf.Kprobe {
+			return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
+		}
+	case tracepointEvent:
+		if t := prog.Type(); t != ebpf.TracePoint {
+			return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
+		}
+	default:
+		return nil, fmt.Errorf("unknown perf event type: %d", pe.typ)
+	}
+
+	if err := haveBPFLinkPerfEvent(); err == nil {
+		return attachPerfEventLink(pe, prog)
+	}
+	return attachPerfEventIoctl(pe, prog)
+}
+
+func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
+	if pe.cookie != 0 {
+		return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
+	}
+
+	// Assign the eBPF program to the perf event.
+	err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
+	if err != nil {
+		return nil, fmt.Errorf("setting perf event bpf program: %w", err)
+	}
+
+	// PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values.
+	if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
+		return nil, fmt.Errorf("enable perf event: %s", err)
+	}
+
+	pi := &perfEventIoctl{pe}
+
+	// Close the perf event when its reference is lost to avoid leaking system resources.
+	runtime.SetFinalizer(pi, (*perfEventIoctl).Close)
+	return pi, nil
+}
+
+// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
+//
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) {
+	fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+		ProgFd:     uint32(prog.FD()),
+		TargetFd:   pe.fd.Uint(),
+		AttachType: sys.BPF_PERF_EVENT,
+		BpfCookie:  pe.cookie,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
+	}
+
+	pl := &perfEventLink{RawLink{fd: fd}, pe}
+
+	// Close the perf event when its reference is lost to avoid leaking system resources.
+	runtime.SetFinalizer(pl, (*perfEventLink).Close)
+	return pl, nil
+}
+
+// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
+func unsafeStringPtr(str string) (unsafe.Pointer, error) {
+	p, err := unix.BytePtrFromString(str)
+	if err != nil {
+		return nil, err
+	}
+	return unsafe.Pointer(p), nil
+}
+
+// getTraceEventID reads a trace event's ID from tracefs given its group and name.
+// The kernel requires group and name to be alphanumeric or underscore.
+//
+// name automatically has its invalid symbols converted to underscores so the caller
+// can pass a raw symbol name, e.g. a kernel symbol containing dots.
+func getTraceEventID(group, name string) (uint64, error) {
+	name = sanitizeSymbol(name)
+	path, err := sanitizePath(tracefsPath, "events", group, name, "id")
+	if err != nil {
+		return 0, err
+	}
+	tid, err := readUint64FromFile("%d\n", path)
+	if errors.Is(err, os.ErrNotExist) {
+		return 0, err
+	}
+	if err != nil {
+		return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
+	}
+
+	return tid, nil
+}
+
+// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
+// [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints
+// behind the scenes, and can be attached to using these perf events.
+func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
+	attr := unix.PerfEventAttr{
+		Type:        unix.PERF_TYPE_TRACEPOINT,
+		Config:      tid,
+		Sample_type: unix.PERF_SAMPLE_RAW,
+		Sample:      1,
+		Wakeup:      1,
+	}
+
+	fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+	if err != nil {
+		return nil, fmt.Errorf("opening tracepoint perf event: %w", err)
+	}
+
+	return sys.NewFD(fd)
+}
+
+func sanitizePath(base string, path ...string) (string, error) {
+	l := filepath.Join(path...)
+	p := filepath.Join(base, l)
+	if !strings.HasPrefix(p, base) {
+		return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
+	}
+	return p, nil
+}
+
+// readUint64FromFile reads a uint64 from a file.
+//
+// format specifies the contents of the file in fmt.Scanf syntax.
+func readUint64FromFile(format string, path ...string) (uint64, error) {
+	filename := filepath.Join(path...)
+	data, err := os.ReadFile(filename)
+	if err != nil {
+		return 0, fmt.Errorf("reading file %q: %w", filename, err)
+	}
+
+	var value uint64
+	n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
+	if err != nil {
+		return 0, fmt.Errorf("parsing file %q: %w", filename, err)
+	}
+	if n != 1 {
+		return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
+	}
+
+	return value, nil
+}
+
+type uint64FromFileKey struct {
+	format, path string
+}
+
+var uint64FromFileCache = struct {
+	sync.RWMutex
+	values map[uint64FromFileKey]uint64
+}{
+	values: map[uint64FromFileKey]uint64{},
+}
+
+// readUint64FromFileOnce is like readUint64FromFile but memoizes the result.
+func readUint64FromFileOnce(format string, path ...string) (uint64, error) {
+	filename := filepath.Join(path...)
+	key := uint64FromFileKey{format, filename}
+
+	uint64FromFileCache.RLock()
+	if value, ok := uint64FromFileCache.values[key]; ok {
+		uint64FromFileCache.RUnlock()
+		return value, nil
+	}
+	uint64FromFileCache.RUnlock()
+
+	value, err := readUint64FromFile(format, filename)
+	if err != nil {
+		return 0, err
+	}
+
+	uint64FromFileCache.Lock()
+	defer uint64FromFileCache.Unlock()
+
+	if value, ok := uint64FromFileCache.values[key]; ok {
+		// Someone else got here before us, use what is cached.
+		return value, nil
+	}
+
+	uint64FromFileCache.values[key] = value
+	return value, nil
+}
+
+// Probe BPF perf link.
+//
+// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error {
+	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+		Name: "probe_bpf_perf_link",
+		Type: ebpf.Kprobe,
+		Instructions: asm.Instructions{
+			asm.Mov.Imm(asm.R0, 0),
+			asm.Return(),
+		},
+		License: "MIT",
+	})
+	if err != nil {
+		return err
+	}
+	defer prog.Close()
+
+	_, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+		ProgFd:     uint32(prog.FD()),
+		AttachType: sys.BPF_PERF_EVENT,
+	})
+	if errors.Is(err, unix.EINVAL) {
+		return internal.ErrNotSupported
+	}
+	if errors.Is(err, unix.EBADF) {
+		return nil
+	}
+	return err
+})
+
+// isValidTraceID implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
+//
+// Trace event groups, names and kernel symbols must adhere to this set
+// of characters. Non-empty, first character must not be a number, all
+// characters must be alphanumeric or underscore.
+func isValidTraceID(s string) bool {
+	if len(s) < 1 {
+		return false
+	}
+	for i, c := range []byte(s) {
+		switch {
+		case c >= 'a' && c <= 'z':
+		case c >= 'A' && c <= 'Z':
+		case c == '_':
+		case i > 0 && c >= '0' && c <= '9':
+
+		default:
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/cilium/ebpf/link/platform.go b/vendor/github.com/cilium/ebpf/link/platform.go
new file mode 100644
index 0000000000000000000000000000000000000000..eb6f7b7a376905b9f548cbd5ada2c2dd61a08d9a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/platform.go
@@ -0,0 +1,25 @@
+package link
+
+import (
+	"fmt"
+	"runtime"
+)
+
+func platformPrefix(symbol string) string {
+
+	prefix := runtime.GOARCH
+
+	// per https://github.com/golang/go/blob/master/src/go/build/syslist.go
+	switch prefix {
+	case "386":
+		prefix = "ia32"
+	case "amd64", "amd64p32":
+		prefix = "x64"
+	case "arm64", "arm64be":
+		prefix = "arm64"
+	default:
+		return symbol
+	}
+
+	return fmt.Sprintf("__%s_%s", prefix, symbol)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea31817377fc3af274c97a66f0e5b3e0537210a2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/program.go
@@ -0,0 +1,76 @@
+package link
+
+import (
+	"fmt"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+type RawAttachProgramOptions struct {
+	// File descriptor to attach to. This differs for each attach type.
+	Target int
+	// Program to attach.
+	Program *ebpf.Program
+	// Program to replace (cgroups).
+	Replace *ebpf.Program
+	// Attach must match the attach type of Program (and Replace).
+	Attach ebpf.AttachType
+	// Flags control the attach behaviour. This differs for each attach type.
+	Flags uint32
+}
+
+// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
+//
+// You should use one of the higher level abstractions available in this
+// package if possible.
+func RawAttachProgram(opts RawAttachProgramOptions) error {
+	if err := haveProgAttach(); err != nil {
+		return err
+	}
+
+	var replaceFd uint32
+	if opts.Replace != nil {
+		replaceFd = uint32(opts.Replace.FD())
+	}
+
+	attr := sys.ProgAttachAttr{
+		TargetFd:     uint32(opts.Target),
+		AttachBpfFd:  uint32(opts.Program.FD()),
+		ReplaceBpfFd: replaceFd,
+		AttachType:   uint32(opts.Attach),
+		AttachFlags:  uint32(opts.Flags),
+	}
+
+	if err := sys.ProgAttach(&attr); err != nil {
+		return fmt.Errorf("can't attach program: %w", err)
+	}
+	return nil
+}
+
+type RawDetachProgramOptions struct {
+	Target  int
+	Program *ebpf.Program
+	Attach  ebpf.AttachType
+}
+
+// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
+//
+// You should use one of the higher level abstractions available in this
+// package if possible.
+func RawDetachProgram(opts RawDetachProgramOptions) error {
+	if err := haveProgAttach(); err != nil {
+		return err
+	}
+
+	attr := sys.ProgDetachAttr{
+		TargetFd:    uint32(opts.Target),
+		AttachBpfFd: uint32(opts.Program.FD()),
+		AttachType:  uint32(opts.Attach),
+	}
+	if err := sys.ProgDetach(&attr); err != nil {
+		return fmt.Errorf("can't detach program: %w", err)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go
new file mode 100644
index 0000000000000000000000000000000000000000..8c882414d20124419a67c9be39068994856c350c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/query.go
@@ -0,0 +1,63 @@
+package link
+
+import (
+	"fmt"
+	"os"
+	"unsafe"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+// QueryOptions defines additional parameters when querying for programs.
+type QueryOptions struct {
+	// Path can be a path to a cgroup, netns or LIRC2 device
+	Path string
+	// Attach specifies the AttachType of the programs queried for
+	Attach ebpf.AttachType
+	// QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
+	QueryFlags uint32
+}
+
+// QueryPrograms retrieves ProgramIDs associated with the AttachType.
+//
+// It only returns IDs of programs that were attached using PROG_ATTACH and not bpf_link.
+// Returns (nil, nil) if there are no programs attached to the queried kernel resource.
+// Calling QueryPrograms on a kernel missing PROG_QUERY will result in ErrNotSupported.
+func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
+	if haveProgQuery() != nil {
+		return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
+	}
+
+	f, err := os.Open(opts.Path)
+	if err != nil {
+		return nil, fmt.Errorf("can't open file: %s", err)
+	}
+	defer f.Close()
+
+	// query the number of programs to allocate correct slice size
+	attr := sys.ProgQueryAttr{
+		TargetFd:   uint32(f.Fd()),
+		AttachType: sys.AttachType(opts.Attach),
+		QueryFlags: opts.QueryFlags,
+	}
+	if err := sys.ProgQuery(&attr); err != nil {
+		return nil, fmt.Errorf("can't query program count: %w", err)
+	}
+
+	// return nil if no progs are attached
+	if attr.ProgCount == 0 {
+		return nil, nil
+	}
+
+	// we have at least one prog, so we query again
+	progIds := make([]ebpf.ProgramID, attr.ProgCount)
+	attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0]))
+	attr.ProgCount = uint32(len(progIds))
+	if err := sys.ProgQuery(&attr); err != nil {
+		return nil, fmt.Errorf("can't query program IDs: %w", err)
+	}
+
+	return progIds, nil
+
+}
diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
new file mode 100644
index 0000000000000000000000000000000000000000..925e621cbbc7d32da6c8b37ed115a2952ed7aa31
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
@@ -0,0 +1,87 @@
+package link
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+type RawTracepointOptions struct {
+	// Tracepoint name.
+	Name string
+	// Program must be of type RawTracepoint*
+	Program *ebpf.Program
+}
+
+// AttachRawTracepoint links a BPF program to a raw_tracepoint.
+//
+// Requires at least Linux 4.17.
+func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
+	if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable {
+		return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
+	}
+	if opts.Program.FD() < 0 {
+		return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+	}
+
+	fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+		Name:   sys.NewStringPointer(opts.Name),
+		ProgFd: uint32(opts.Program.FD()),
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	err = haveBPFLink()
+	if errors.Is(err, ErrNotSupported) {
+		// Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction")
+		// raw_tracepoints are just a plain fd.
+		return &simpleRawTracepoint{fd}, nil
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &rawTracepoint{RawLink{fd: fd}}, nil
+}
+
+type simpleRawTracepoint struct {
+	fd *sys.FD
+}
+
+var _ Link = (*simpleRawTracepoint)(nil)
+
+func (frt *simpleRawTracepoint) isLink() {}
+
+func (frt *simpleRawTracepoint) Close() error {
+	return frt.fd.Close()
+}
+
+func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error {
+	return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Pin(string) error {
+	return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Unpin() error {
+	return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Info() (*Info, error) {
+	return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported)
+}
+
+type rawTracepoint struct {
+	RawLink
+}
+
+var _ Link = (*rawTracepoint)(nil)
+
+func (rt *rawTracepoint) Update(_ *ebpf.Program) error {
+	return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go
new file mode 100644
index 0000000000000000000000000000000000000000..94f3958cc4d83da300cea3ba11f1c1902f7a11b6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go
@@ -0,0 +1,40 @@
+package link
+
+import (
+	"syscall"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+// AttachSocketFilter attaches a SocketFilter BPF program to a socket.
+func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error {
+	rawConn, err := conn.SyscallConn()
+	if err != nil {
+		return err
+	}
+	var ssoErr error
+	err = rawConn.Control(func(fd uintptr) {
+		ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
+	})
+	if ssoErr != nil {
+		return ssoErr
+	}
+	return err
+}
+
+// DetachSocketFilter detaches a SocketFilter BPF program from a socket.
+func DetachSocketFilter(conn syscall.Conn) error {
+	rawConn, err := conn.SyscallConn()
+	if err != nil {
+		return err
+	}
+	var ssoErr error
+	err = rawConn.Control(func(fd uintptr) {
+		ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
+	})
+	if ssoErr != nil {
+		return ssoErr
+	}
+	return err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
new file mode 100644
index 0000000000000000000000000000000000000000..38f7ae9b78625a1f58e6c52f5ac931ebec990fa8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -0,0 +1,123 @@
+package link
+
+import (
+	"errors"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/asm"
+	"github.com/cilium/ebpf/internal"
+	"github.com/cilium/ebpf/internal/sys"
+	"github.com/cilium/ebpf/internal/unix"
+)
+
+// Type is the kind of link.
+type Type = sys.LinkType
+
+// Valid link types.
+const (
+	UnspecifiedType   = sys.BPF_LINK_TYPE_UNSPEC
+	RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT
+	TracingType       = sys.BPF_LINK_TYPE_TRACING
+	CgroupType        = sys.BPF_LINK_TYPE_CGROUP
+	IterType          = sys.BPF_LINK_TYPE_ITER
+	NetNsType         = sys.BPF_LINK_TYPE_NETNS
+	XDPType           = sys.BPF_LINK_TYPE_XDP
+	PerfEventType     = sys.BPF_LINK_TYPE_PERF_EVENT
+	KprobeMultiType   = sys.BPF_LINK_TYPE_KPROBE_MULTI
+)
+
+var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
+	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+		Type:    ebpf.CGroupSKB,
+		License: "MIT",
+		Instructions: asm.Instructions{
+			asm.Mov.Imm(asm.R0, 0),
+			asm.Return(),
+		},
+	})
+	if err != nil {
+		return internal.ErrNotSupported
+	}
+
+	// BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB,
+	// so being able to load the program is enough to infer that we
+	// have the syscall.
+	prog.Close()
+	return nil
+})
+
+var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
+	if err := haveProgAttach(); err != nil {
+		return err
+	}
+
+	prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+		Type:       ebpf.CGroupSKB,
+		AttachType: ebpf.AttachCGroupInetIngress,
+		License:    "MIT",
+		Instructions: asm.Instructions{
+			asm.Mov.Imm(asm.R0, 0),
+			asm.Return(),
+		},
+	})
+	if err != nil {
+		return internal.ErrNotSupported
+	}
+	defer prog.Close()
+
+	// We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs.
+	// If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't
+	// present.
+	attr := sys.ProgAttachAttr{
+		// We rely on this being checked after attachFlags.
+		TargetFd:    ^uint32(0),
+		AttachBpfFd: uint32(prog.FD()),
+		AttachType:  uint32(ebpf.AttachCGroupInetIngress),
+		AttachFlags: uint32(flagReplace),
+	}
+
+	err = sys.ProgAttach(&attr)
+	if errors.Is(err, unix.EINVAL) {
+		return internal.ErrNotSupported
+	}
+	if errors.Is(err, unix.EBADF) {
+		return nil
+	}
+	return err
+})
+
+var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error {
+	attr := sys.LinkCreateAttr{
+		// This is a hopefully invalid file descriptor, which triggers EBADF.
+		TargetFd:   ^uint32(0),
+		ProgFd:     ^uint32(0),
+		AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+	}
+	_, err := sys.LinkCreate(&attr)
+	if errors.Is(err, unix.EINVAL) {
+		return internal.ErrNotSupported
+	}
+	if errors.Is(err, unix.EBADF) {
+		return nil
+	}
+	return err
+})
+
+var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error {
+	attr := sys.ProgQueryAttr{
+		// We rely on this being checked during the syscall.
+		// With an otherwise correct payload we expect EBADF here
+		// as an indication that the feature is present.
+		TargetFd:   ^uint32(0),
+		AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+	}
+
+	err := sys.ProgQuery(&attr)
+	if errors.Is(err, unix.EINVAL) {
+		return internal.ErrNotSupported
+	}
+	if errors.Is(err, unix.EBADF) {
+		return nil
+	}
+	return err
+})
diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go
new file mode 100644
index 0000000000000000000000000000000000000000..a59ef9d1c5271a9a1c0675db61d486b3b9d335ad
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go
@@ -0,0 +1,77 @@
+package link
+
+import (
+	"fmt"
+
+	"github.com/cilium/ebpf"
+)
+
+// TracepointOptions defines additional parameters that will be used
+// when loading Tracepoints.
+type TracepointOptions struct {
+	// Arbitrary value that can be fetched from an eBPF program
+	// via `bpf_get_attach_cookie()`.
+	//
+	// Needs kernel 5.15+.
+	Cookie uint64
+}
+
+// Tracepoint attaches the given eBPF program to the tracepoint with the given
+// group and name. See /sys/kernel/debug/tracing/events to find available
+// tracepoints. The top-level directory is the group, the event's subdirectory
+// is the name. Example:
+//
+//	tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil)
+//
+// Losing the reference to the resulting Link (tp) will close the Tracepoint
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
+// only possible as of kernel 4.14 (commit cf5f5ce).
+func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {
+	if group == "" || name == "" {
+		return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput)
+	}
+	if prog == nil {
+		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+	}
+	if !isValidTraceID(group) || !isValidTraceID(name) {
+		return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput)
+	}
+	if prog.Type() != ebpf.TracePoint {
+		return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput)
+	}
+
+	tid, err := getTraceEventID(group, name)
+	if err != nil {
+		return nil, err
+	}
+
+	fd, err := openTracepointPerfEvent(tid, perfAllThreads)
+	if err != nil {
+		return nil, err
+	}
+
+	var cookie uint64
+	if opts != nil {
+		cookie = opts.Cookie
+	}
+
+	pe := &perfEvent{
+		typ:       tracepointEvent,
+		group:     group,
+		name:      name,
+		tracefsID: tid,
+		cookie:    cookie,
+		fd:        fd,
+	}
+
+	lnk, err := attachPerfEvent(pe, prog)
+	if err != nil {
+		pe.Close()
+		return nil, err
+	}
+
+	return lnk, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go
new file mode 100644
index 0000000000000000000000000000000000000000..e26cc91498ba0e232d893146323dbd0408958135
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracing.go
@@ -0,0 +1,150 @@
+package link
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/btf"
+	"github.com/cilium/ebpf/internal/sys"
+)
+
+type tracing struct {
+	RawLink
+}
+
+func (f *tracing) Update(new *ebpf.Program) error {
+	return fmt.Errorf("tracing update: %w", ErrNotSupported)
+}
+
+// AttachFreplace attaches the given eBPF program to the function it replaces.
+//
+// The program and name can either be provided at link time, or can be provided
+// at program load time. If they were provided at load time, they should be nil
+// and empty respectively here, as they will be ignored by the kernel.
+// Examples:
+//
+//	AttachFreplace(dispatcher, "function", replacement)
+//	AttachFreplace(nil, "", replacement)
+func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) {
+	if (name == "") != (targetProg == nil) {
+		return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput)
+	}
+	if prog == nil {
+		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+	}
+	if prog.Type() != ebpf.Extension {
+		return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput)
+	}
+
+	var (
+		target int
+		typeID btf.TypeID
+	)
+	if targetProg != nil {
+		btfHandle, err := targetProg.Handle()
+		if err != nil {
+			return nil, err
+		}
+		defer btfHandle.Close()
+
+		spec, err := btfHandle.Spec()
+		if err != nil {
+			return nil, err
+		}
+
+		var function *btf.Func
+		if err := spec.TypeByName(name, &function); err != nil {
+			return nil, err
+		}
+
+		target = targetProg.FD()
+		typeID, err = spec.TypeID(function)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	link, err := AttachRawLink(RawLinkOptions{
+		Target:  target,
+		Program: prog,
+		Attach:  ebpf.AttachNone,
+		BTF:     typeID,
+	})
+	if errors.Is(err, sys.ENOTSUPP) {
+		// This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+		return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &tracing{*link}, nil
+}
+
+type TracingOptions struct {
+	// Program must be of type Tracing with attach type
+	// AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or
+	// AttachTraceRawTp.
+	Program *ebpf.Program
+}
+
+type LSMOptions struct {
+	// Program must be of type LSM with attach type
+	// AttachLSMMac.
+	Program *ebpf.Program
+}
+
+// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id.
+func attachBTFID(program *ebpf.Program) (Link, error) {
+	if program.FD() < 0 {
+		return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd)
+	}
+
+	fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+		ProgFd: uint32(program.FD()),
+	})
+	if errors.Is(err, sys.ENOTSUPP) {
+		// This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+		return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+	}
+	if err != nil {
+		return nil, fmt.Errorf("create raw tracepoint: %w", err)
+	}
+
+	raw := RawLink{fd: fd}
+	info, err := raw.Info()
+	if err != nil {
+		raw.Close()
+		return nil, err
+	}
+
+	if info.Type == RawTracepointType {
+		// Sadness upon sadness: a Tracing program with AttachRawTp returns
+		// a raw_tracepoint link. Other types return a tracing link.
+		return &rawTracepoint{raw}, nil
+	}
+
+	return &tracing{RawLink: RawLink{fd: fd}}, nil
+}
+
+// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or
+// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined
+// in kernel modules.
+func AttachTracing(opts TracingOptions) (Link, error) {
+	if t := opts.Program.Type(); t != ebpf.Tracing {
+		return nil, fmt.Errorf("invalid program type %s, expected Tracing", t)
+	}
+
+	return attachBTFID(opts.Program)
+}
+
+// AttachLSM links a Linux security module (LSM) BPF Program to a BPF
+// hook defined in kernel modules.
+func AttachLSM(opts LSMOptions) (Link, error) {
+	if t := opts.Program.Type(); t != ebpf.LSM {
+		return nil, fmt.Errorf("invalid program type %s, expected LSM", t)
+	}
+
+	return attachBTFID(opts.Program)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa1ad9bbef2f98aa46eb2d1a38d2426dafa3d903
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/uprobe.go
@@ -0,0 +1,359 @@
+package link
+
+import (
+	"debug/elf"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/cilium/ebpf"
+	"github.com/cilium/ebpf/internal"
+)
+
+var (
+	uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
+
+	uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
+	// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
+	uprobeRefCtrOffsetShift = 32
+	haveRefCtrOffsetPMU     = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error {
+		_, err := os.Stat(uprobeRefCtrOffsetPMUPath)
+		if err != nil {
+			return internal.ErrNotSupported
+		}
+		return nil
+	})
+
+	// ErrNoSymbol indicates that the given symbol was not found
+	// in the ELF symbols table.
+	ErrNoSymbol = errors.New("not found")
+)
+
+// Executable defines an executable program on the filesystem.
+type Executable struct {
+	// Path of the executable on the filesystem.
+	path string
+	// Parsed ELF and dynamic symbols' addresses.
+	addresses map[string]uint64
+}
+
+// UprobeOptions defines additional parameters that will be used
+// when loading Uprobes.
+type UprobeOptions struct {
+	// Symbol address. Must be provided in case of external symbols (shared libs).
+	// If set, overrides the address eventually parsed from the executable.
+	Address uint64
+	// The offset relative to given symbol. Useful when tracing an arbitrary point
+	// inside the frame of given symbol.
+	//
+	// Note: this field changed from being an absolute offset to being relative
+	// to Address.
+	Offset uint64
+	// Only set the uprobe on the given process ID. Useful when tracing
+	// shared library calls or programs that have many running instances.
+	PID int
+	// Automatically manage SDT reference counts (semaphores).
+	//
+	// If this field is set, the Kernel will increment/decrement the
+	// semaphore located in the process memory at the provided address on
+	// probe attach/detach.
+	//
+	// See also:
+	// sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling)
+	// github.com/torvalds/linux/commit/1cc33161a83d
+	// github.com/torvalds/linux/commit/a6ca88b241d5
+	RefCtrOffset uint64
+	// Arbitrary value that can be fetched from an eBPF program
+	// via `bpf_get_attach_cookie()`.
+	//
+	// Needs kernel 5.15+.
+	Cookie uint64
+}
+
+// To open a new Executable, use:
+//
+//	OpenExecutable("/bin/bash")
+//
+// The returned value can then be used to open Uprobe(s).
+func OpenExecutable(path string) (*Executable, error) {
+	if path == "" {
+		return nil, fmt.Errorf("path cannot be empty")
+	}
+
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, fmt.Errorf("open file '%s': %w", path, err)
+	}
+	defer f.Close()
+
+	se, err := internal.NewSafeELFFile(f)
+	if err != nil {
+		return nil, fmt.Errorf("parse ELF file: %w", err)
+	}
+
+	if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN {
+		// ELF is not an executable or a shared object.
+		return nil, errors.New("the given file is not an executable or a shared object")
+	}
+
+	ex := Executable{
+		path:      path,
+		addresses: make(map[string]uint64),
+	}
+
+	if err := ex.load(se); err != nil {
+		return nil, err
+	}
+
+	return &ex, nil
+}
+
+func (ex *Executable) load(f *internal.SafeELFFile) error {
+	syms, err := f.Symbols()
+	if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+		return err
+	}
+
+	dynsyms, err := f.DynamicSymbols()
+	if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+		return err
+	}
+
+	syms = append(syms, dynsyms...)
+
+	for _, s := range syms {
+		if elf.ST_TYPE(s.Info) != elf.STT_FUNC {
+			// Symbol not associated with a function or other executable code.
+			continue
+		}
+
+		address := s.Value
+
+		// Loop over ELF segments.
+		for _, prog := range f.Progs {
+			// Skip uninteresting segments.
+			if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 {
+				continue
+			}
+
+			if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) {
+				// If the symbol value is contained in the segment, calculate
+				// the symbol offset.
+				//
+				// fn symbol offset = fn symbol VA - .text VA + .text offset
+				//
+				// stackoverflow.com/a/40249502
+				address = s.Value - prog.Vaddr + prog.Off
+				break
+			}
+		}
+
+		ex.addresses[s.Name] = address
+	}
+
+	return nil
+}
+
+// address calculates the address of a symbol in the executable.
+//
+// opts must not be nil.
+func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) {
+	if opts.Address > 0 {
+		return opts.Address + opts.Offset, nil
+	}
+
+	address, ok := ex.addresses[symbol]
+	if !ok {
+		return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
+	}
+
+	// Symbols with location 0 from section undef are shared library calls and
+	// are relocated before the binary is executed. Dynamic linking is not
+	// implemented by the library, so mark this as unsupported for now.
+	//
+	// Since only offset values are stored and not elf.Symbol, if the value is 0,
+	// assume it's an external symbol.
+	if address == 0 {
+		return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+
+			"(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported)
+	}
+
+	return address + opts.Offset, nil
+}
+
+// Uprobe attaches the given eBPF program to a perf event that fires when the
+// given symbol starts executing in the given Executable.
+// For example, /bin/bash::main():
+//
+//	ex, _ = OpenExecutable("/bin/bash")
+//	ex.Uprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+//	up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+	u, err := ex.uprobe(symbol, prog, opts, false)
+	if err != nil {
+		return nil, err
+	}
+
+	lnk, err := attachPerfEvent(u, prog)
+	if err != nil {
+		u.Close()
+		return nil, err
+	}
+
+	return lnk, nil
+}
+
+// Uretprobe attaches the given eBPF program to a perf event that fires right
+// before the given symbol exits. For example, /bin/bash::main():
+//
+//	ex, _ = OpenExecutable("/bin/bash")
+//	ex.Uretprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+//	up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+	u, err := ex.uprobe(symbol, prog, opts, true)
+	if err != nil {
+		return nil, err
+	}
+
+	lnk, err := attachPerfEvent(u, prog)
+	if err != nil {
+		u.Close()
+		return nil, err
+	}
+
+	return lnk, nil
+}
+
+// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
+// If ret is true, create a uretprobe.
+func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) {
+	if prog == nil {
+		return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+	}
+	if prog.Type() != ebpf.Kprobe {
+		return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput)
+	}
+	if opts == nil {
+		opts = &UprobeOptions{}
+	}
+
+	offset, err := ex.address(symbol, opts)
+	if err != nil {
+		return nil, err
+	}
+
+	pid := opts.PID
+	if pid == 0 {
+		pid = perfAllThreads
+	}
+
+	if opts.RefCtrOffset != 0 {
+		if err := haveRefCtrOffsetPMU(); err != nil {
+			return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err)
+		}
+	}
+
+	args := probeArgs{
+		symbol:       symbol,
+		path:         ex.path,
+		offset:       offset,
+		pid:          pid,
+		refCtrOffset: opts.RefCtrOffset,
+		ret:          ret,
+		cookie:       opts.Cookie,
+	}
+
+	// Use uprobe PMU if the kernel has it available.
+	tp, err := pmuUprobe(args)
+	if err == nil {
+		return tp, nil
+	}
+	if err != nil && !errors.Is(err, ErrNotSupported) {
+		return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err)
+	}
+
+	// Use tracefs if uprobe PMU is missing.
+	args.symbol = sanitizeSymbol(symbol)
+	tp, err = tracefsUprobe(args)
+	if err != nil {
+		return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
+	}
+
+	return tp, nil
+}
+
+// pmuUprobe opens a perf event based on the uprobe PMU.
+func pmuUprobe(args probeArgs) (*perfEvent, error) {
+	return pmuProbe(uprobeType, args)
+}
+
+// tracefsUprobe creates a Uprobe tracefs entry.
+func tracefsUprobe(args probeArgs) (*perfEvent, error) {
+	return tracefsProbe(uprobeType, args)
+}
+
+// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore.
+// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
+func sanitizeSymbol(s string) string {
+	var b strings.Builder
+	b.Grow(len(s))
+	var skip bool
+	for _, c := range []byte(s) {
+		switch {
+		case c >= 'a' && c <= 'z',
+			c >= 'A' && c <= 'Z',
+			c >= '0' && c <= '9':
+			skip = false
+			b.WriteByte(c)
+
+		default:
+			if !skip {
+				b.WriteByte('_')
+				skip = true
+			}
+		}
+	}
+
+	return b.String()
+}
+
+// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
+func uprobeToken(args probeArgs) string {
+	po := fmt.Sprintf("%s:%#x", args.path, args.offset)
+
+	if args.refCtrOffset != 0 {
+		// This is not documented in Documentation/trace/uprobetracer.txt.
+		// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
+		po += fmt.Sprintf("(%#x)", args.refCtrOffset)
+	}
+
+	return po
+}
diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa8dd3a4cb39160aa7d9ebbb69ff0c78cc74539e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/xdp.go
@@ -0,0 +1,54 @@
+package link
+
+import (
+	"fmt"
+
+	"github.com/cilium/ebpf"
+)
+
+// XDPAttachFlags represents how XDP program will be attached to interface.
+type XDPAttachFlags uint32
+
+const (
+	// XDPGenericMode (SKB) links XDP BPF program for drivers which do
+	// not yet support native XDP.
+	XDPGenericMode XDPAttachFlags = 1 << (iota + 1)
+	// XDPDriverMode links XDP BPF program into the driver’s receive path.
+	XDPDriverMode
+	// XDPOffloadMode offloads the entire XDP BPF program into hardware.
+	XDPOffloadMode
+)
+
+type XDPOptions struct {
+	// Program must be an XDP BPF program.
+	Program *ebpf.Program
+
+	// Interface is the interface index to attach program to.
+	Interface int
+
+	// Flags is one of XDPAttachFlags (optional).
+	//
+	// Only one XDP mode should be set, without flag defaults
+	// to driver/generic mode (best effort).
+	Flags XDPAttachFlags
+}
+
+// AttachXDP links an XDP BPF program to an XDP hook.
+func AttachXDP(opts XDPOptions) (Link, error) {
+	if t := opts.Program.Type(); t != ebpf.XDP {
+		return nil, fmt.Errorf("invalid program type %s, expected XDP", t)
+	}
+
+	if opts.Interface < 1 {
+		return nil, fmt.Errorf("invalid interface index: %d", opts.Interface)
+	}
+
+	rawLink, err := AttachRawLink(RawLinkOptions{
+		Program: opts.Program,
+		Attach:  ebpf.AttachXDP,
+		Target:  opts.Interface,
+		Flags:   uint32(opts.Flags),
+	})
+
+	return rawLink, err
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2d580fafb7f278518ee99741f94220d3ba709a44..f7eac71192da08a480a780846e114a71ec8f08c1 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -16,6 +16,7 @@ github.com/cilium/ebpf/internal
 github.com/cilium/ebpf/internal/epoll
 github.com/cilium/ebpf/internal/sys
 github.com/cilium/ebpf/internal/unix
+github.com/cilium/ebpf/link
 github.com/cilium/ebpf/ringbuf
 github.com/cilium/ebpf/rlimit
 # github.com/davecgh/go-spew v1.1.1