|
7 | 7 |
|
8 | 8 | #include <core/CLogger.h>
|
9 | 9 |
|
| 10 | +#include <cerrno> |
| 11 | +#include <cstdint> |
| 12 | +#include <cstring> |
| 13 | + |
10 | 14 | #include <linux/audit.h>
|
11 | 15 | #include <linux/filter.h>
|
12 | 16 | #include <sys/prctl.h>
|
13 | 17 | #include <sys/syscall.h>
|
14 | 18 |
|
15 |
| -#include <cerrno> |
16 |
| -#include <cstdint> |
17 |
| -#include <cstring> |
18 |
| - |
19 | 19 | namespace ml {
|
20 | 20 | namespace seccomp {
|
21 | 21 |
|
@@ -45,53 +45,70 @@ const std::uint32_t SECCOMP_DATA_NR_OFFSET = 0x00;
|
45 | 45 | const struct sock_filter FILTER[] = {
|
46 | 46 | // Load the system call number into accumulator
|
47 | 47 | BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SECCOMP_DATA_NR_OFFSET),
|
48 |
| - // Only applies to X86_64 arch. Jump to disallow for calls using the x32 ABI |
49 |
| - BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, UPPER_NR_LIMIT, 42, 0), |
| 48 | + |
| 49 | +#ifdef __x86_64__ |
| 50 | + // Only applies to x86_64 arch. Jump to disallow for calls using the i386 ABI |
| 51 | + BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, UPPER_NR_LIMIT, 46, 0), |
50 | 52 | // If any sys call filters are added or removed then the jump
|
51 | 53 | // destination for each statement including the one above must
|
52 | 54 | // be updated accordingly
|
53 | 55 |
|
54 |
| - // Allowed sys calls, jump to return allow on match |
55 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getpriority, 42, 0), // for nice |
56 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_setpriority, 41, 0), // for nice |
57 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_read, 40, 0), |
58 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_write, 39, 0), |
59 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_writev, 38, 0), |
60 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_lseek, 37, 0), |
61 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_lstat, 36, 0), |
62 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_time, 35, 0), |
63 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_clock_gettime, 34, 0), |
64 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_gettimeofday, 33, 0), |
65 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_readlink, 32, 0), |
66 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_stat, 31, 0), |
67 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_fstat, 30, 0), |
68 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_open, 29, 0), |
69 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_close, 28, 0), |
70 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_connect, 27, 0), |
71 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_clone, 26, 0), |
72 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_statfs, 25, 0), |
73 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_dup2, 24, 0), |
74 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mkdir, 23, 0), // for forecast temp storage |
75 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rmdir, 22, 0), // for forecast temp storage |
76 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_unlinkat, 21, 0), // for forecast temp storage |
77 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getdents, 20, 0), // for forecast temp storage |
78 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getdents64, 19, 0), // for forecast temp storage |
79 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_openat, 18, 0), // for forecast temp storage |
80 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_tgkill, 17, 0), // for the crash handler |
81 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rt_sigaction, 16, 0), // for the crash handler |
82 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rt_sigreturn, 15, 0), |
83 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_futex, 14, 0), |
84 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_madvise, 13, 0), |
85 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_unlink, 12, 0), |
86 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mknod, 11, 0), |
87 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_nanosleep, 10, 0), |
88 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_set_robust_list, 9, 0), |
89 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mprotect, 8, 0), |
90 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_munmap, 7, 0), |
91 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mmap, 6, 0), |
92 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getuid, 5, 0), |
93 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_exit_group, 4, 0), |
94 |
| - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_access, 3, 0), |
| 56 | + // Allowed architecture-specific sys calls, jump to return allow on match |
| 57 | + // Some of these are not used in latest glibc, and not supported in Linux |
| 58 | + // kernels for recent architectures, but in a few cases different sys calls |
| 59 | + // are used on different architectures |
| 60 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_open, 46, 0), |
| 61 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_dup2, 45, 0), |
| 62 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_unlink, 44, 0), |
| 63 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_stat, 43, 0), |
| 64 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_lstat, 42, 0), |
| 65 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_time, 41, 0), |
| 66 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_readlink, 40, 0), |
| 67 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getdents, 39, 0), // for forecast temp storage |
| 68 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rmdir, 38, 0), // for forecast temp storage |
| 69 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mkdir, 37, 0), // for forecast temp storage |
| 70 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mknod, 36, 0), |
| 71 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_access, 35, 0), |
| 72 | +#elif defined(__aarch64__) |
| 73 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mknodat, 36, 0), |
| 74 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_faccessat, 35, 0), |
| 75 | +#else |
| 76 | +#error Unsupported hardware architecture |
| 77 | +#endif |
| 78 | + |
| 79 | + // Allowed sys calls for all architectures, jump to return allow on match |
| 80 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_newfstatat, 34, 0), |
| 81 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_readlinkat, 33, 0), |
| 82 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_dup3, 32, 0), |
| 83 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getpriority, 31, 0), // for nice |
| 84 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_setpriority, 30, 0), // for nice |
| 85 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_read, 29, 0), |
| 86 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_write, 28, 0), |
| 87 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_writev, 27, 0), |
| 88 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_lseek, 26, 0), |
| 89 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_clock_gettime, 25, 0), |
| 90 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_gettimeofday, 24, 0), |
| 91 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_fstat, 23, 0), |
| 92 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_close, 22, 0), |
| 93 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_connect, 21, 0), |
| 94 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_clone, 20, 0), |
| 95 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_statfs, 19, 0), |
| 96 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mkdirat, 18, 0), // for forecast temp storage |
| 97 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_unlinkat, 17, 0), // for forecast temp storage |
| 98 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getdents64, 16, 0), // for forecast temp storage |
| 99 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_openat, 15, 0), // for forecast temp storage |
| 100 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_tgkill, 14, 0), // for the crash handler |
| 101 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rt_sigaction, 13, 0), // for the crash handler |
| 102 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rt_sigreturn, 12, 0), |
| 103 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_futex, 11, 0), |
| 104 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_madvise, 10, 0), |
| 105 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_nanosleep, 9, 0), |
| 106 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_set_robust_list, 8, 0), |
| 107 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mprotect, 7, 0), |
| 108 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_munmap, 6, 0), |
| 109 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_mmap, 5, 0), |
| 110 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_getuid, 4, 0), |
| 111 | + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_exit_group, 3, 0), |
95 | 112 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_brk, 2, 0),
|
96 | 113 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_exit, 1, 0),
|
97 | 114 | // Disallow call with error code EACCES
|
|
0 commit comments