1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 #include <test_progs.h> 4 #include <time.h> 5 6 #include <sys/epoll.h> 7 8 #include "struct_ops_module.skel.h" 9 #include "struct_ops_nulled_out_cb.skel.h" 10 #include "struct_ops_forgotten_cb.skel.h" 11 #include "struct_ops_detach.skel.h" 12 13 static void check_map_info(struct bpf_map_info *info) 14 { 15 struct bpf_btf_info btf_info; 16 char btf_name[256]; 17 u32 btf_info_len = sizeof(btf_info); 18 int err, fd; 19 20 fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); 21 if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) 22 return; 23 24 memset(&btf_info, 0, sizeof(btf_info)); 25 btf_info.name = ptr_to_u64(btf_name); 26 btf_info.name_len = sizeof(btf_name); 27 err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); 28 if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) 29 goto cleanup; 30 31 if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) 32 goto cleanup; 33 34 cleanup: 35 close(fd); 36 } 37 38 static int attach_ops_and_check(struct struct_ops_module *skel, 39 struct bpf_map *map, 40 int expected_test_2_result) 41 { 42 struct bpf_link *link; 43 44 link = bpf_map__attach_struct_ops(map); 45 ASSERT_OK_PTR(link, "attach_test_mod_1"); 46 if (!link) 47 return -1; 48 49 /* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */ 50 ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result"); 51 ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result"); 52 53 bpf_link__destroy(link); 54 return 0; 55 } 56 57 static void test_struct_ops_load(void) 58 { 59 struct struct_ops_module *skel; 60 struct bpf_map_info info = {}; 61 int err; 62 u32 len; 63 64 skel = struct_ops_module__open(); 65 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) 66 return; 67 68 skel->struct_ops.testmod_1->data = 13; 69 skel->struct_ops.testmod_1->test_2 = skel->progs.test_3; 70 /* Since test_2() is not being used, it should be disabled from 71 * auto-loading, or it will fail to load. 72 */ 73 bpf_program__set_autoload(skel->progs.test_2, false); 74 bpf_map__set_autocreate(skel->maps.testmod_zeroed, false); 75 76 err = struct_ops_module__load(skel); 77 if (!ASSERT_OK(err, "struct_ops_module_load")) 78 goto cleanup; 79 80 len = sizeof(info); 81 err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, 82 &len); 83 if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) 84 goto cleanup; 85 86 check_map_info(&info); 87 /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c 88 * 89 * In bpf_testmod.c it will pass 4 and 13 (the value of data) to 90 * .test_2. So, the value of test_2_result should be 20 (4 + 13 + 91 * 3). 92 */ 93 if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20)) 94 goto cleanup; 95 if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12)) 96 goto cleanup; 97 98 cleanup: 99 struct_ops_module__destroy(skel); 100 } 101 102 static void test_struct_ops_not_zeroed(void) 103 { 104 struct struct_ops_module *skel; 105 int err; 106 107 /* zeroed is 0, and zeroed_op is null */ 108 skel = struct_ops_module__open(); 109 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) 110 return; 111 112 skel->struct_ops.testmod_zeroed->zeroed = 0; 113 /* zeroed_op prog should be not loaded automatically now */ 114 skel->struct_ops.testmod_zeroed->zeroed_op = NULL; 115 116 err = struct_ops_module__load(skel); 117 ASSERT_OK(err, "struct_ops_module_load"); 118 119 struct_ops_module__destroy(skel); 120 121 /* zeroed is not 0 */ 122 skel = struct_ops_module__open(); 123 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed")) 124 return; 125 126 /* libbpf should reject the testmod_zeroed since struct 127 * bpf_testmod_ops in the kernel has no "zeroed" field and the 128 * value of "zeroed" is non-zero. 129 */ 130 skel->struct_ops.testmod_zeroed->zeroed = 0xdeadbeef; 131 skel->struct_ops.testmod_zeroed->zeroed_op = NULL; 132 err = struct_ops_module__load(skel); 133 ASSERT_ERR(err, "struct_ops_module_load_not_zeroed"); 134 135 struct_ops_module__destroy(skel); 136 137 /* zeroed_op is not null */ 138 skel = struct_ops_module__open(); 139 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed_op")) 140 return; 141 142 /* libbpf should reject the testmod_zeroed since the value of its 143 * "zeroed_op" is not null. 144 */ 145 skel->struct_ops.testmod_zeroed->zeroed_op = skel->progs.test_3; 146 err = struct_ops_module__load(skel); 147 ASSERT_ERR(err, "struct_ops_module_load_not_zeroed_op"); 148 149 struct_ops_module__destroy(skel); 150 } 151 152 /* The signature of an implementation might not match the signature of the 153 * function pointer prototype defined in the BPF program. This mismatch 154 * should be allowed as long as the behavior of the operator program 155 * adheres to the signature in the kernel. Libbpf should not enforce the 156 * signature; rather, let the kernel verifier handle the enforcement. 157 */ 158 static void test_struct_ops_incompatible(void) 159 { 160 struct struct_ops_module *skel; 161 struct bpf_link *link; 162 int err; 163 164 skel = struct_ops_module__open(); 165 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) 166 return; 167 168 bpf_map__set_autocreate(skel->maps.testmod_zeroed, false); 169 170 err = struct_ops_module__load(skel); 171 if (!ASSERT_OK(err, "skel_load")) 172 goto cleanup; 173 174 link = bpf_map__attach_struct_ops(skel->maps.testmod_incompatible); 175 if (ASSERT_OK_PTR(link, "attach_struct_ops")) 176 bpf_link__destroy(link); 177 178 cleanup: 179 struct_ops_module__destroy(skel); 180 } 181 182 /* validate that it's ok to "turn off" callback that kernel supports */ 183 static void test_struct_ops_nulled_out_cb(void) 184 { 185 struct struct_ops_nulled_out_cb *skel; 186 int err; 187 188 skel = struct_ops_nulled_out_cb__open(); 189 if (!ASSERT_OK_PTR(skel, "skel_open")) 190 return; 191 192 /* kernel knows about test_1, but we still null it out */ 193 skel->struct_ops.ops->test_1 = NULL; 194 195 err = struct_ops_nulled_out_cb__load(skel); 196 if (!ASSERT_OK(err, "skel_load")) 197 goto cleanup; 198 199 ASSERT_FALSE(bpf_program__autoload(skel->progs.test_1_turn_off), "prog_autoload"); 200 ASSERT_LT(bpf_program__fd(skel->progs.test_1_turn_off), 0, "prog_fd"); 201 202 cleanup: 203 struct_ops_nulled_out_cb__destroy(skel); 204 } 205 206 /* validate that libbpf generates reasonable error message if struct_ops is 207 * not referenced in any struct_ops map 208 */ 209 static void test_struct_ops_forgotten_cb(void) 210 { 211 struct struct_ops_forgotten_cb *skel; 212 char *log; 213 int err; 214 215 skel = struct_ops_forgotten_cb__open(); 216 if (!ASSERT_OK_PTR(skel, "skel_open")) 217 return; 218 219 start_libbpf_log_capture(); 220 221 err = struct_ops_forgotten_cb__load(skel); 222 if (!ASSERT_ERR(err, "skel_load")) 223 goto cleanup; 224 225 log = stop_libbpf_log_capture(); 226 ASSERT_HAS_SUBSTR(log, 227 "prog 'test_1_forgotten': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?", 228 "libbpf_log"); 229 free(log); 230 231 struct_ops_forgotten_cb__destroy(skel); 232 233 /* now let's programmatically use it, we should be fine now */ 234 skel = struct_ops_forgotten_cb__open(); 235 if (!ASSERT_OK_PTR(skel, "skel_open")) 236 return; 237 238 skel->struct_ops.ops->test_1 = skel->progs.test_1_forgotten; /* not anymore */ 239 240 err = struct_ops_forgotten_cb__load(skel); 241 if (!ASSERT_OK(err, "skel_load")) 242 goto cleanup; 243 244 cleanup: 245 struct_ops_forgotten_cb__destroy(skel); 246 } 247 248 /* Detach a link from a user space program */ 249 static void test_detach_link(void) 250 { 251 struct epoll_event ev, events[2]; 252 struct struct_ops_detach *skel; 253 struct bpf_link *link = NULL; 254 int fd, epollfd = -1, nfds; 255 int err; 256 257 skel = struct_ops_detach__open_and_load(); 258 if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load")) 259 return; 260 261 link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach); 262 if (!ASSERT_OK_PTR(link, "attach_struct_ops")) 263 goto cleanup; 264 265 fd = bpf_link__fd(link); 266 if (!ASSERT_GE(fd, 0, "link_fd")) 267 goto cleanup; 268 269 epollfd = epoll_create1(0); 270 if (!ASSERT_GE(epollfd, 0, "epoll_create1")) 271 goto cleanup; 272 273 ev.events = EPOLLHUP; 274 ev.data.fd = fd; 275 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev); 276 if (!ASSERT_OK(err, "epoll_ctl")) 277 goto cleanup; 278 279 err = bpf_link__detach(link); 280 if (!ASSERT_OK(err, "detach_link")) 281 goto cleanup; 282 283 /* Wait for EPOLLHUP */ 284 nfds = epoll_wait(epollfd, events, 2, 500); 285 if (!ASSERT_EQ(nfds, 1, "epoll_wait")) 286 goto cleanup; 287 288 if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd")) 289 goto cleanup; 290 if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events")) 291 goto cleanup; 292 293 cleanup: 294 if (epollfd >= 0) 295 close(epollfd); 296 bpf_link__destroy(link); 297 struct_ops_detach__destroy(skel); 298 } 299 300 void serial_test_struct_ops_module(void) 301 { 302 if (test__start_subtest("struct_ops_load")) 303 test_struct_ops_load(); 304 if (test__start_subtest("struct_ops_not_zeroed")) 305 test_struct_ops_not_zeroed(); 306 if (test__start_subtest("struct_ops_incompatible")) 307 test_struct_ops_incompatible(); 308 if (test__start_subtest("struct_ops_null_out_cb")) 309 test_struct_ops_nulled_out_cb(); 310 if (test__start_subtest("struct_ops_forgotten_cb")) 311 test_struct_ops_forgotten_cb(); 312 if (test__start_subtest("test_detach_link")) 313 test_detach_link(); 314 } 315 316