|
12 | 12 | */
|
13 | 13 | enum bpf_field_info_kind {
|
14 | 14 | BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
|
| 15 | + BPF_FIELD_BYTE_SIZE = 1, |
15 | 16 | BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
|
| 17 | + BPF_FIELD_SIGNED = 3, |
| 18 | + BPF_FIELD_LSHIFT_U64 = 4, |
| 19 | + BPF_FIELD_RSHIFT_U64 = 5, |
16 | 20 | };
|
17 | 21 |
|
| 22 | +#define __CORE_RELO(src, field, info) \ |
| 23 | + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) |
| 24 | + |
| 25 | +#if __BYTE_ORDER == __LITTLE_ENDIAN |
| 26 | +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ |
| 27 | + bpf_probe_read((void *)dst, \ |
| 28 | + __CORE_RELO(src, fld, BYTE_SIZE), \ |
| 29 | + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) |
| 30 | +#else |
| 31 | +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so |
| 32 | + * for big-endian we need to adjust destination pointer accordingly, based on |
| 33 | + * field byte size |
| 34 | + */ |
| 35 | +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ |
| 36 | + bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ |
| 37 | + __CORE_RELO(src, fld, BYTE_SIZE), \ |
| 38 | + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) |
| 39 | +#endif |
| 40 | + |
| 41 | +/* |
| 42 | + * Extract bitfield, identified by src->field, and put its value into u64 |
| 43 | + * *res. All this is done in relocatable manner, so bitfield changes such as |
| 44 | + * signedness, bit size, offset changes, this will be handled automatically. |
| 45 | + * This version of macro is using bpf_probe_read() to read underlying integer |
| 46 | + * storage. Macro functions as an expression and its return type is |
| 47 | + * bpf_probe_read()'s return value: 0, on success, <0 on error. |
| 48 | + */ |
| 49 | +#define BPF_CORE_READ_BITFIELD_PROBED(src, field, res) ({ \ |
| 50 | + unsigned long long val; \ |
| 51 | + \ |
| 52 | + *res = 0; \ |
| 53 | + val = __CORE_BITFIELD_PROBE_READ(res, src, field); \ |
| 54 | + if (!val) { \ |
| 55 | + *res <<= __CORE_RELO(src, field, LSHIFT_U64); \ |
| 56 | + val = __CORE_RELO(src, field, RSHIFT_U64); \ |
| 57 | + if (__CORE_RELO(src, field, SIGNED)) \ |
| 58 | + *res = ((long long)*res) >> val; \ |
| 59 | + else \ |
| 60 | + *res = ((unsigned long long)*res) >> val; \ |
| 61 | + val = 0; \ |
| 62 | + } \ |
| 63 | + val; \ |
| 64 | +}) |
| 65 | + |
| 66 | +/* |
| 67 | + * Extract bitfield, identified by src->field, and return its value as u64. |
| 68 | + * This version of macro is using direct memory reads and should be used from |
| 69 | + * BPF program types that support such functionality (e.g., typed raw |
| 70 | + * tracepoints). |
| 71 | + */ |
| 72 | +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ |
| 73 | + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ |
| 74 | + unsigned long long val; \ |
| 75 | + \ |
| 76 | + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ |
| 77 | + case 1: val = *(const unsigned char *)p; \ |
| 78 | + case 2: val = *(const unsigned short *)p; \ |
| 79 | + case 4: val = *(const unsigned int *)p; \ |
| 80 | + case 8: val = *(const unsigned long long *)p; \ |
| 81 | + } \ |
| 82 | + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ |
| 83 | + if (__CORE_RELO(s, field, SIGNED)) \ |
| 84 | + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ |
| 85 | + else \ |
| 86 | + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ |
| 87 | + val; \ |
| 88 | +}) |
| 89 | + |
18 | 90 | /*
|
19 | 91 | * Convenience macro to check that field actually exists in target kernel's.
|
20 | 92 | * Returns:
|
|
0 commit comments