diff --git a/bpf/flows.c b/bpf/flows.c index a7c233f733ce15fb84aa82cd479c31a02fcd8fd9..ba934557f19cddfcdb57bee22168f69dec983007 100644 --- a/bpf/flows.c +++ b/bpf/flows.c @@ -13,7 +13,6 @@ until an entry is available. 4) When hash collision is detected, we send the new entry to userpace via ringbuffer. */ -#define BPF_NO_PRESERVE_ACCESS_INDEX #include <vmlinux.h> #include <bpf_helpers.h> diff --git a/bpf/headers/bpf_core_read.h b/bpf/headers/bpf_core_read.h new file mode 100644 index 0000000000000000000000000000000000000000..496e6a8ee0dc923f85afb96186a002d2b2080757 --- /dev/null +++ b/bpf/headers/bpf_core_read.h @@ -0,0 +1,484 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_CORE_READ_H__ +#define __BPF_CORE_READ_H__ + +/* + * enum bpf_field_info_kind is passed as a second argument into + * __builtin_preserve_field_info() built-in to get a specific aspect of + * a field, captured as a first argument. __builtin_preserve_field_info(field, + * info_kind) returns __u32 integer and produces BTF field relocation, which + * is understood and processed by libbpf during BPF object loading. See + * selftests/bpf for examples. + */ +enum bpf_field_info_kind { + BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, + BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, +}; + +/* second argument to __builtin_btf_type_id() built-in */ +enum bpf_type_id_kind { + BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */ + BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */ +}; + +/* second argument to __builtin_preserve_type_info() built-in */ +enum bpf_type_info_kind { + BPF_TYPE_EXISTS = 0, /* type existence in target kernel */ + BPF_TYPE_SIZE = 1, /* type size in target kernel */ + BPF_TYPE_MATCHES = 2, /* type match in target kernel */ +}; + +/* second argument to __builtin_preserve_enum_value() built-in */ +enum bpf_enum_value_kind { + BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */ + BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */ +}; + +#define __CORE_RELO(src, field, info) \ + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read_kernel( \ + (void *)dst, \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#else +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so + * for big-endian we need to adjust destination pointer accordingly, based on + * field byte size + */ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read_kernel( \ + (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#endif + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * All this is done in relocatable manner, so bitfield changes such as + * signedness, bit size, offset changes, this will be handled automatically. + * This version of macro is using bpf_probe_read_kernel() to read underlying + * integer storage. Macro functions as an expression and its return type is + * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error. + */ +#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ + unsigned long long val = 0; \ + \ + __CORE_BITFIELD_PROBE_READ(&val, s, field); \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * This version of macro is using direct memory reads and should be used from + * BPF program types that support such functionality (e.g., typed raw + * tracepoints). + */ +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ + unsigned long long val; \ + \ + /* This is a so-called barrier_var() operation that makes specified \ + * variable "a black box" for optimizing compiler. \ + * It forces compiler to perform BYTE_OFFSET relocation on p and use \ + * its calculated value in the switch below, instead of applying \ + * the same relocation 4 times for each individual memory load. \ + */ \ + asm volatile("" : "=r"(p) : "0"(p)); \ + \ + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ + case 1: val = *(const unsigned char *)p; break; \ + case 2: val = *(const unsigned short *)p; break; \ + case 4: val = *(const unsigned int *)p; break; \ + case 8: val = *(const unsigned long long *)p; break; \ + } \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +#define ___bpf_field_ref1(field) (field) +#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field) +#define ___bpf_field_ref(args...) \ + ___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args) + +/* + * Convenience macro to check that field actually exists in target kernel's. + * Returns: + * 1, if matching field is present in target kernel; + * 0, if no matching field found. + * + * Supports two forms: + * - field reference through variable access: + * bpf_core_field_exists(p->my_field); + * - field reference through type and field names: + * bpf_core_field_exists(struct my_type, my_field). + */ +#define bpf_core_field_exists(field...) \ + __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS) + +/* + * Convenience macro to get the byte size of a field. Works for integers, + * struct/unions, pointers, arrays, and enums. + * + * Supports two forms: + * - field reference through variable access: + * bpf_core_field_size(p->my_field); + * - field reference through type and field names: + * bpf_core_field_size(struct my_type, my_field). + */ +#define bpf_core_field_size(field...) \ + __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE) + +/* + * Convenience macro to get field's byte offset. + * + * Supports two forms: + * - field reference through variable access: + * bpf_core_field_offset(p->my_field); + * - field reference through type and field names: + * bpf_core_field_offset(struct my_type, my_field). + */ +#define bpf_core_field_offset(field...) \ + __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET) + +/* + * Convenience macro to get BTF type ID of a specified type, using a local BTF + * information. Return 32-bit unsigned integer with type ID from program's own + * BTF. Always succeeds. + */ +#define bpf_core_type_id_local(type) \ + __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL) + +/* + * Convenience macro to get BTF type ID of a target kernel's type that matches + * specified local type. + * Returns: + * - valid 32-bit unsigned type ID in kernel BTF; + * - 0, if no matching type was found in a target kernel BTF. + */ +#define bpf_core_type_id_kernel(type) \ + __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET) + +/* + * Convenience macro to check that provided named type + * (struct/union/enum/typedef) exists in a target kernel. + * Returns: + * 1, if such type is present in target kernel's BTF; + * 0, if no matching type is found. + */ +#define bpf_core_type_exists(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS) + +/* + * Convenience macro to check that provided named type + * (struct/union/enum/typedef) "matches" that in a target kernel. + * Returns: + * 1, if the type matches in the target kernel's BTF; + * 0, if the type does not match any in the target kernel + */ +#define bpf_core_type_matches(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES) + +/* + * Convenience macro to get the byte size of a provided named type + * (struct/union/enum/typedef) in a target kernel. + * Returns: + * >= 0 size (in bytes), if type is present in target kernel's BTF; + * 0, if no matching type is found. + */ +#define bpf_core_type_size(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE) + +/* + * Convenience macro to check that provided enumerator value is defined in + * a target kernel. + * Returns: + * 1, if specified enum type and its enumerator value are present in target + * kernel's BTF; + * 0, if no matching enum and/or enum value within that enum is found. + */ +#define bpf_core_enum_value_exists(enum_type, enum_value) \ + __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS) + +/* + * Convenience macro to get the integer value of an enumerator value in + * a target kernel. + * Returns: + * 64-bit value, if specified enum type and its enumerator value are + * present in target kernel's BTF; + * 0, if no matching enum and/or enum value within that enum is found. + */ +#define bpf_core_enum_value(enum_type, enum_value) \ + __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE) + +/* + * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures + * offset relocation for source address using __builtin_preserve_access_index() + * built-in, provided by Clang. + * + * __builtin_preserve_access_index() takes as an argument an expression of + * taking an address of a field within struct/union. It makes compiler emit + * a relocation, which records BTF type ID describing root struct/union and an + * accessor string which describes exact embedded field that was used to take + * an address. See detailed description of this relocation format and + * semantics in comments to struct bpf_field_reloc in libbpf_internal.h. + * + * This relocation allows libbpf to adjust BPF instruction to use correct + * actual field offset, based on target kernel BTF type that matches original + * (local) BTF, used to record relocation. + */ +#define bpf_core_read(dst, sz, src) \ + bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src)) + +/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ +#define bpf_core_read_user(dst, sz, src) \ + bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src)) +/* + * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() + * additionally emitting BPF CO-RE field relocation for specified source + * argument. + */ +#define bpf_core_read_str(dst, sz, src) \ + bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) + +/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ +#define bpf_core_read_user_str(dst, sz, src) \ + bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) + +#define ___concat(a, b) a ## b +#define ___apply(fn, n) ___concat(fn, n) +#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N + +/* + * return number of provided arguments; used for switch-based variadic macro + * definitions (see ___last, ___arrow, etc below) + */ +#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +/* + * return 0 if no arguments are passed, N - otherwise; used for + * recursively-defined macros to specify termination (0) case, and generic + * (N) case (e.g., ___read_ptrs, ___core_read) + */ +#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___last1(x) x +#define ___last2(a, x) x +#define ___last3(a, b, x) x +#define ___last4(a, b, c, x) x +#define ___last5(a, b, c, d, x) x +#define ___last6(a, b, c, d, e, x) x +#define ___last7(a, b, c, d, e, f, x) x +#define ___last8(a, b, c, d, e, f, g, x) x +#define ___last9(a, b, c, d, e, f, g, h, x) x +#define ___last10(a, b, c, d, e, f, g, h, i, x) x +#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___nolast2(a, _) a +#define ___nolast3(a, b, _) a, b +#define ___nolast4(a, b, c, _) a, b, c +#define ___nolast5(a, b, c, d, _) a, b, c, d +#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e +#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f +#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g +#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h +#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i +#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___arrow1(a) a +#define ___arrow2(a, b) a->b +#define ___arrow3(a, b, c) a->b->c +#define ___arrow4(a, b, c, d) a->b->c->d +#define ___arrow5(a, b, c, d, e) a->b->c->d->e +#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f +#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g +#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h +#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i +#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j +#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___type(...) typeof(___arrow(__VA_ARGS__)) + +#define ___read(read_fn, dst, src_type, src, accessor) \ + read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) + +/* "recursively" read a sequence of inner pointers using local __t var */ +#define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a); +#define ___rd_last(fn, ...) \ + ___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); +#define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__) +#define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___read_ptrs(fn, src, ...) \ + ___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__) + +#define ___core_read0(fn, fn_ptr, dst, src, a) \ + ___read(fn, dst, ___type(src), src, a); +#define ___core_readN(fn, fn_ptr, dst, src, ...) \ + ___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \ + ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ + ___last(__VA_ARGS__)); +#define ___core_read(fn, fn_ptr, dst, src, a, ...) \ + ___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \ + src, a, ##__VA_ARGS__) + +/* + * BPF_CORE_READ_INTO() is a more performance-conscious variant of + * BPF_CORE_READ(), in which final field is read into user-provided storage. + * See BPF_CORE_READ() below for more details on general usage. + */ +#define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read, bpf_core_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * Variant of BPF_CORE_READ_INTO() for reading from user-space memory. + * + * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. + */ +#define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read_user, bpf_core_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ_INTO() */ +#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read, bpf_probe_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ_USER_INTO(). + * + * As no CO-RE relocations are emitted, source types can be arbitrary and are + * not restricted to kernel types only. + */ +#define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read_user, bpf_probe_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as + * BPF_CORE_READ() for intermediate pointers, but then executes (and returns + * corresponding error code) bpf_core_read_str() for final string read. + */ +#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read_str, bpf_core_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory. + * + * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. + */ +#define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read_user_str, bpf_core_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */ +#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read_str, bpf_probe_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO(). + * + * As no CO-RE relocations are emitted, source types can be arbitrary and are + * not restricted to kernel types only. + */ +#define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially + * when there are few pointer chasing steps. + * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: + * int x = s->a.b.c->d.e->f->g; + * can be succinctly achieved using BPF_CORE_READ as: + * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); + * + * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF + * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically + * equivalent to: + * 1. const void *__t = s->a.b.c; + * 2. __t = __t->d.e; + * 3. __t = __t->f; + * 4. return __t->g; + * + * Equivalence is logical, because there is a heavy type casting/preservation + * involved, as well as all the reads are happening through + * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to + * emit CO-RE relocations. + * + * N.B. Only up to 9 "field accessors" are supported, which should be more + * than enough for any practical purpose. + */ +#define BPF_CORE_READ(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +/* + * Variant of BPF_CORE_READ() for reading from user-space memory. + * + * NOTE: all the source types involved are still *kernel types* and need to + * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will + * fail. Custom user types are not relocatable with CO-RE. + * The typical situation in which BPF_CORE_READ_USER() might be used is to + * read kernel UAPI types from the user-space memory passed in as a syscall + * input argument. + */ +#define BPF_CORE_READ_USER(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ() */ +#define BPF_PROBE_READ(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +/* + * Non-CO-RE variant of BPF_CORE_READ_USER(). + * + * As no CO-RE relocations are emitted, source types can be arbitrary and are + * not restricted to kernel types only. + */ +#define BPF_PROBE_READ_USER(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +#endif + diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go index e2b771559dc41394411ff1ef1fdf35c0c566ded8..efd44c82115d42b4614cc0c108ccd25c2f5c8240 100644 --- a/pkg/agent/agent.go +++ b/pkg/agent/agent.go @@ -78,7 +78,7 @@ type ebpfFlowFetcher interface { io.Closer Register(iface ifaces.Interface) error - LookupAndDeleteMap() map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics + LookupAndDeleteMap() map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics ReadRingBuf() (ringbuf.Record, error) } @@ -352,8 +352,8 @@ func (f *Flows) buildAndStartPipeline(ctx context.Context) (*node.Terminal[[]*fl } alog.Debug("connecting flows' processing graph") - mapTracer := node.AsStart(f.mapTracer.TraceLoop(ctx)) - rbTracer := node.AsStart(f.rbTracer.TraceLoop(ctx)) + mapTracer := node.AsStart(f.mapTracer.TraceLoop(ctx, f.cfg.EnableGC)) + rbTracer := node.AsStart(f.rbTracer.TraceLoop(ctx, f.cfg.EnableGC)) accounter := node.AsMiddle(f.accounter.Account, node.ChannelBufferLen(f.cfg.BuffersLength)) diff --git a/pkg/agent/agent_test.go b/pkg/agent/agent_test.go index a8818ab93c11786afe35c798088cf34748ee12cc..1cf3a1ff5017522ad7744967c604ce9dee3266c4 100644 --- a/pkg/agent/agent_test.go +++ b/pkg/agent/agent_test.go @@ -185,8 +185,8 @@ func testAgent(t *testing.T, cfg *Config) *test.ExporterFake { now := uint64(monotime.Now()) key1Metrics := ebpf.BpfFlowMetrics{Packets: 3, Bytes: 44, StartMonoTimeTs: now + 1000, EndMonoTimeTs: now + 1_000_000_000} - ebpfTracer.AppendLookupResults(map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics{ - key1: key1Metrics, + ebpfTracer.AppendLookupResults(map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{ + key1: &key1Metrics, }) return export } diff --git a/pkg/agent/config.go b/pkg/agent/config.go index c9e9d913c6b3136396ac3b85e36440fbe45f9de5..6dfb49b2dc766e60b7e7b0b3643a57703b8d63c0 100644 --- a/pkg/agent/config.go +++ b/pkg/agent/config.go @@ -136,4 +136,6 @@ type Config struct { KafkaSASLClientSecretPath string `env:"KAFKA_SASL_CLIENT_SECRET_PATH"` // ProfilePort sets the listening port for Go's Pprof tool. If it is not set, profile is disabled ProfilePort int `env:"PROFILE_PORT"` + // EnableGC enables golang garbage collection run at the end of every map eviction, default is true + EnableGC bool `env:"ENABLE_GARBAGE_COLLECTION" envDefault:"true"` } diff --git a/pkg/ebpf/bpf_bpfeb.o b/pkg/ebpf/bpf_bpfeb.o index d844353d9a0b367e85237787e1313b1b601b1c01..f3dd6b1ea09dea6751857a74838cac9ded0401a6 100644 Binary files a/pkg/ebpf/bpf_bpfeb.o and b/pkg/ebpf/bpf_bpfeb.o differ diff --git a/pkg/ebpf/bpf_bpfel.o b/pkg/ebpf/bpf_bpfel.o index 75c4ad1b1b0fbd9c97adbcf8009bb3811e575f2c..b6095610414e142b585363b87c85ed31fa4fe8f5 100644 Binary files a/pkg/ebpf/bpf_bpfel.o and b/pkg/ebpf/bpf_bpfel.o differ diff --git a/pkg/ebpf/tracer.go b/pkg/ebpf/tracer.go index 4efa75bbfabb7808d86e9c8f4b78aee67a42d5e4..2253c1f4b8abec1044437fcb9acfa478840e8054 100644 --- a/pkg/ebpf/tracer.go +++ b/pkg/ebpf/tracer.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/ringbuf" "github.com/cilium/ebpf/rlimit" "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces" @@ -81,7 +82,12 @@ func NewFlowFetcher( } return nil, fmt.Errorf("loading and assigning BPF objects: %w", err) } - + /* + * since we load the program only when the we start we need to release + * memory used by cached kernel BTF see https://github.com/cilium/ebpf/issues/1063 + * for more details. + */ + btf.FlushKernelSpec() // read events from igress+egress ringbuffer flows, err := ringbuf.NewReader(objects.DirectFlows) if err != nil { @@ -124,7 +130,7 @@ func (m *FlowFetcher) Register(iface ifaces.Interface) error { if errors.Is(err, fs.ErrExist) { ilog.WithError(err).Warn("qdisc clsact already exists. Ignoring") } else { - return fmt.Errorf("failed to create clsact qdisc on %d (%s): %T %w", iface.Index, iface.Name, err, err) + return fmt.Errorf("failed to create clsact qdisc on %d (%s): %w", iface.Index, iface.Name, err) } } m.qdiscs[iface] = qdisc @@ -133,11 +139,7 @@ func (m *FlowFetcher) Register(iface ifaces.Interface) error { return err } - if err := m.registerIngress(iface, ipvlan); err != nil { - return err - } - - return nil + return m.registerIngress(iface, ipvlan) } func (m *FlowFetcher) registerEgress(iface ifaces.Interface, ipvlan netlink.Link) error { @@ -308,14 +310,14 @@ func (m *FlowFetcher) ReadRingBuf() (ringbuf.Record, error) { // TODO: detect whether BatchLookupAndDelete is supported (Kernel>=5.6) and use it selectively // Supported Lookup/Delete operations by kernel: https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md // Race conditions here causes that some flows are lost in high-load scenarios -func (m *FlowFetcher) LookupAndDeleteMap() map[BpfFlowId]BpfFlowMetrics { +func (m *FlowFetcher) LookupAndDeleteMap() map[BpfFlowId]*BpfFlowMetrics { flowMap := m.objects.AggregatedFlows iterator := flowMap.Iterate() - var flow = make(map[BpfFlowId]BpfFlowMetrics, m.cacheMaxSize) - - id := BpfFlowId{} + var flow = make(map[BpfFlowId]*BpfFlowMetrics, m.cacheMaxSize) + var id BpfFlowId var metric BpfFlowMetrics + // Changing Iterate+Delete by LookupAndDelete would prevent some possible race conditions // TODO: detect whether LookupAndDelete is supported (Kernel>=4.20) and use it selectively for iterator.Next(&id, &metric) { @@ -323,10 +325,9 @@ func (m *FlowFetcher) LookupAndDeleteMap() map[BpfFlowId]BpfFlowMetrics { log.WithError(err).WithField("flowId", id). Warnf("couldn't delete flow entry") } - // We observed that eBFP PerCPU map might insert multiple times the same key in the map - // (probably due to race conditions) so we need to re-join metrics again at userspace - // TODO: instrument how many times the keys are is repeated in the same eviction - flow[id] = metric + metricPtr := new(BpfFlowMetrics) + *metricPtr = metric + flow[id] = metricPtr } return flow } diff --git a/pkg/exporter/ipfix.go b/pkg/exporter/ipfix.go index 67f18adce907b26b80c257dd14a1d4a5c333e76b..e3066a75587b13b1cf936d0334c4df533b48d003 100644 --- a/pkg/exporter/ipfix.go +++ b/pkg/exporter/ipfix.go @@ -327,7 +327,7 @@ func setEntities(record *flow.Record, elements *[]entities.InfoElementWithValue) setIERecordValue(record, &ieVal) } } -func (ipf *IPFIX) sendDataRecord(log *logrus.Entry, record *flow.Record, v6 bool) error { +func (ipf *IPFIX) sendDataRecord(_ *logrus.Entry, record *flow.Record, v6 bool) error { dataSet := entities.NewSet(false) var templateID uint16 if v6 { diff --git a/pkg/flow/tracer_map.go b/pkg/flow/tracer_map.go index 563c2850be7bb96b7dcf2a7c904addc72133b548..e067e34e062818527354265513f6ac09d3c6c299 100644 --- a/pkg/flow/tracer_map.go +++ b/pkg/flow/tracer_map.go @@ -2,6 +2,7 @@ package flow import ( "context" + "runtime" "sync" "time" @@ -25,7 +26,7 @@ type MapTracer struct { } type mapFetcher interface { - LookupAndDeleteMap() map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics + LookupAndDeleteMap() map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics } func NewMapTracer(fetcher mapFetcher, evictionTimeout time.Duration) *MapTracer { @@ -43,10 +44,10 @@ func (m *MapTracer) Flush() { m.evictionCond.Broadcast() } -func (m *MapTracer) TraceLoop(ctx context.Context) node.StartFunc[[]*Record] { +func (m *MapTracer) TraceLoop(ctx context.Context, enableGC bool) node.StartFunc[[]*Record] { return func(out chan<- []*Record) { evictionTicker := time.NewTicker(m.evictionTimeout) - go m.evictionSynchronization(ctx, out) + go m.evictionSynchronization(ctx, enableGC, out) for { select { case <-ctx.Done(): @@ -64,7 +65,7 @@ func (m *MapTracer) TraceLoop(ctx context.Context) node.StartFunc[[]*Record] { // evictionSynchronization loop just waits for the evictionCond to happen // and triggers the actual eviction. It makes sure that only one eviction // is being triggered at the same time -func (m *MapTracer) evictionSynchronization(ctx context.Context, out chan<- []*Record) { +func (m *MapTracer) evictionSynchronization(ctx context.Context, enableGC bool, out chan<- []*Record) { // flow eviction loop. It just keeps waiting for eviction until someone triggers the // evictionCond.Broadcast signal for { @@ -77,14 +78,14 @@ func (m *MapTracer) evictionSynchronization(ctx context.Context, out chan<- []*R return default: mtlog.Debug("evictionSynchronization signal received") - m.evictFlows(ctx, out) + m.evictFlows(ctx, enableGC, out) } m.evictionCond.L.Unlock() } } -func (m *MapTracer) evictFlows(ctx context.Context, forwardFlows chan<- []*Record) { +func (m *MapTracer) evictFlows(ctx context.Context, enableGC bool, forwardFlows chan<- []*Record) { // it's important that this monotonic timer reports same or approximate values as kernel-side bpf_ktime_get_ns() monotonicTimeNow := monotime.Now() currentTime := time.Now() @@ -103,7 +104,7 @@ func (m *MapTracer) evictFlows(ctx context.Context, forwardFlows chan<- []*Recor } forwardingFlows = append(forwardingFlows, NewRecord( flowKey, - aggregatedMetrics, + *aggregatedMetrics, currentTime, uint64(monotonicTimeNow), )) @@ -115,5 +116,9 @@ func (m *MapTracer) evictFlows(ctx context.Context, forwardFlows chan<- []*Recor default: forwardFlows <- forwardingFlows } + + if enableGC { + runtime.GC() + } mtlog.Debugf("%d flows evicted", len(forwardingFlows)) } diff --git a/pkg/flow/tracer_ringbuf.go b/pkg/flow/tracer_ringbuf.go index bd6e3791d9652bbb52e3c9342467660520a51b25..eadfcea8347136d9ad97ffcd7d6600ed0ba4a0a1 100644 --- a/pkg/flow/tracer_ringbuf.go +++ b/pkg/flow/tracer_ringbuf.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "runtime" "sync/atomic" "syscall" "time" @@ -51,7 +52,7 @@ func NewRingBufTracer( } } -func (m *RingBufTracer) TraceLoop(ctx context.Context) node.StartFunc[*RawRecord] { +func (m *RingBufTracer) TraceLoop(ctx context.Context, enableGC bool) node.StartFunc[*RawRecord] { return func(out chan<- *RawRecord) { debugging := logrus.IsLevelEnabled(logrus.DebugLevel) for { @@ -60,7 +61,7 @@ func (m *RingBufTracer) TraceLoop(ctx context.Context) node.StartFunc[*RawRecord rtlog.Debug("exiting trace loop due to context cancellation") return default: - if err := m.listenAndForwardRingBuffer(debugging, out); err != nil { + if err := m.listenAndForwardRingBuffer(debugging, enableGC, out); err != nil { if errors.Is(err, ringbuf.ErrClosed) { rtlog.Debug("Received signal, exiting..") return @@ -73,7 +74,7 @@ func (m *RingBufTracer) TraceLoop(ctx context.Context) node.StartFunc[*RawRecord } } -func (m *RingBufTracer) listenAndForwardRingBuffer(debugging bool, forwardCh chan<- *RawRecord) error { +func (m *RingBufTracer) listenAndForwardRingBuffer(debugging, enableGC bool, forwardCh chan<- *RawRecord) error { event, err := m.ringBuffer.ReadRingBuf() if err != nil { return fmt.Errorf("reading from ring buffer: %w", err) @@ -95,7 +96,9 @@ func (m *RingBufTracer) listenAndForwardRingBuffer(debugging bool, forwardCh cha // Will need to send it to accounter anyway to account regardless of complete/ongoing flow forwardCh <- readFlow - + if enableGC { + runtime.GC() + } return nil } diff --git a/pkg/test/tracer_fake.go b/pkg/test/tracer_fake.go index 495acfca56bf6a5485594ba8596a6d12eb33f1f3..960de0c6eefbae0d64477cf6987fc1a0970864f0 100644 --- a/pkg/test/tracer_fake.go +++ b/pkg/test/tracer_fake.go @@ -13,14 +13,14 @@ import ( // TracerFake fakes the kernel-side eBPF map structures for testing type TracerFake struct { interfaces map[ifaces.Interface]struct{} - mapLookups chan map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics + mapLookups chan map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics ringBuf chan ringbuf.Record } func NewTracerFake() *TracerFake { return &TracerFake{ interfaces: map[ifaces.Interface]struct{}{}, - mapLookups: make(chan map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics, 100), + mapLookups: make(chan map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics, 100), ringBuf: make(chan ringbuf.Record, 100), } } @@ -33,12 +33,12 @@ func (m *TracerFake) Register(iface ifaces.Interface) error { return nil } -func (m *TracerFake) LookupAndDeleteMap() map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics { +func (m *TracerFake) LookupAndDeleteMap() map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics { select { case r := <-m.mapLookups: return r default: - return map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics{} + return map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{} } } @@ -46,7 +46,7 @@ func (m *TracerFake) ReadRingBuf() (ringbuf.Record, error) { return <-m.ringBuf, nil } -func (m *TracerFake) AppendLookupResults(results map[ebpf.BpfFlowId]ebpf.BpfFlowMetrics) { +func (m *TracerFake) AppendLookupResults(results map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics) { m.mapLookups <- results } diff --git a/scripts/update-bpf-headers.sh b/scripts/update-bpf-headers.sh index 38f97d871bf05ed96598e58a9ed1b4d0386a77bc..ce0301b214744ed1251bfef7ae94837bd627e827 100755 --- a/scripts/update-bpf-headers.sh +++ b/scripts/update-bpf-headers.sh @@ -11,6 +11,7 @@ headers=( "$prefix"/src/bpf_helper_defs.h "$prefix"/src/bpf_helpers.h "$prefix"/src/bpf_tracing.h + "$prefix"/src/bpf_core_read.h ) # Fetch libbpf release and extract the desired headers