1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 #include <test_progs.h> 4 #include <time.h> 5 6 #include "struct_ops_module.skel.h" 7 #include "struct_ops_nulled_out_cb.skel.h" 8 #include "struct_ops_forgotten_cb.skel.h" 9 10 static void check_map_info(struct bpf_map_info *info) 11 { 12 struct bpf_btf_info btf_info; 13 char btf_name[256]; 14 u32 btf_info_len = sizeof(btf_info); 15 int err, fd; 16 17 fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); 18 if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) 19 return; 20 21 memset(&btf_info, 0, sizeof(btf_info)); 22 btf_info.name = ptr_to_u64(btf_name); 23 btf_info.name_len = sizeof(btf_name); 24 err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); 25 if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) 26 goto cleanup; 27 28 if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) 29 goto cleanup; 30 31 cleanup: 32 close(fd); 33 } 34 35 static int attach_ops_and_check(struct struct_ops_module *skel, 36 struct bpf_map *map, 37 int expected_test_2_result) 38 { 39 struct bpf_link *link; 40 41 link = bpf_map__attach_struct_ops(map); 42 ASSERT_OK_PTR(link, "attach_test_mod_1"); 43 if (!link) 44 return -1; 45 46 /* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */ 47 ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result"); 48 ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result"); 49 50 bpf_link__destroy(link); 51 return 0; 52 } 53 54 static void test_struct_ops_load(void) 55 { 56 struct struct_ops_module *skel; 57 struct bpf_map_info info = {}; 58 int err; 59 u32 len; 60 61 skel = struct_ops_module__open(); 62 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) 63 return; 64 65 skel->struct_ops.testmod_1->data = 13; 66 skel->struct_ops.testmod_1->test_2 = skel->progs.test_3; 67 /* Since test_2() is not being used, it should be disabled from 68 * auto-loading, or it will fail to load. 69 */ 70 bpf_program__set_autoload(skel->progs.test_2, false); 71 bpf_map__set_autocreate(skel->maps.testmod_zeroed, false); 72 73 err = struct_ops_module__load(skel); 74 if (!ASSERT_OK(err, "struct_ops_module_load")) 75 goto cleanup; 76 77 len = sizeof(info); 78 err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, 79 &len); 80 if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) 81 goto cleanup; 82 83 check_map_info(&info); 84 /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c 85 * 86 * In bpf_testmod.c it will pass 4 and 13 (the value of data) to 87 * .test_2. So, the value of test_2_result should be 20 (4 + 13 + 88 * 3). 89 */ 90 if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20)) 91 goto cleanup; 92 if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12)) 93 goto cleanup; 94 95 cleanup: 96 struct_ops_module__destroy(skel); 97 } 98 99 static void test_struct_ops_not_zeroed(void) 100 { 101 struct struct_ops_module *skel; 102 int err; 103 104 /* zeroed is 0, and zeroed_op is null */ 105 skel = struct_ops_module__open(); 106 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) 107 return; 108 109 skel->struct_ops.testmod_zeroed->zeroed = 0; 110 /* zeroed_op prog should be not loaded automatically now */ 111 skel->struct_ops.testmod_zeroed->zeroed_op = NULL; 112 113 err = struct_ops_module__load(skel); 114 ASSERT_OK(err, "struct_ops_module_load"); 115 116 struct_ops_module__destroy(skel); 117 118 /* zeroed is not 0 */ 119 skel = struct_ops_module__open(); 120 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed")) 121 return; 122 123 /* libbpf should reject the testmod_zeroed since struct 124 * bpf_testmod_ops in the kernel has no "zeroed" field and the 125 * value of "zeroed" is non-zero. 126 */ 127 skel->struct_ops.testmod_zeroed->zeroed = 0xdeadbeef; 128 skel->struct_ops.testmod_zeroed->zeroed_op = NULL; 129 err = struct_ops_module__load(skel); 130 ASSERT_ERR(err, "struct_ops_module_load_not_zeroed"); 131 132 struct_ops_module__destroy(skel); 133 134 /* zeroed_op is not null */ 135 skel = struct_ops_module__open(); 136 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed_op")) 137 return; 138 139 /* libbpf should reject the testmod_zeroed since the value of its 140 * "zeroed_op" is not null. 141 */ 142 skel->struct_ops.testmod_zeroed->zeroed_op = skel->progs.test_3; 143 err = struct_ops_module__load(skel); 144 ASSERT_ERR(err, "struct_ops_module_load_not_zeroed_op"); 145 146 struct_ops_module__destroy(skel); 147 } 148 149 /* The signature of an implementation might not match the signature of the 150 * function pointer prototype defined in the BPF program. This mismatch 151 * should be allowed as long as the behavior of the operator program 152 * adheres to the signature in the kernel. Libbpf should not enforce the 153 * signature; rather, let the kernel verifier handle the enforcement. 154 */ 155 static void test_struct_ops_incompatible(void) 156 { 157 struct struct_ops_module *skel; 158 struct bpf_link *link; 159 int err; 160 161 skel = struct_ops_module__open(); 162 if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) 163 return; 164 165 bpf_map__set_autocreate(skel->maps.testmod_zeroed, false); 166 167 err = struct_ops_module__load(skel); 168 if (!ASSERT_OK(err, "skel_load")) 169 goto cleanup; 170 171 link = bpf_map__attach_struct_ops(skel->maps.testmod_incompatible); 172 if (ASSERT_OK_PTR(link, "attach_struct_ops")) 173 bpf_link__destroy(link); 174 175 cleanup: 176 struct_ops_module__destroy(skel); 177 } 178 179 /* validate that it's ok to "turn off" callback that kernel supports */ 180 static void test_struct_ops_nulled_out_cb(void) 181 { 182 struct struct_ops_nulled_out_cb *skel; 183 int err; 184 185 skel = struct_ops_nulled_out_cb__open(); 186 if (!ASSERT_OK_PTR(skel, "skel_open")) 187 return; 188 189 /* kernel knows about test_1, but we still null it out */ 190 skel->struct_ops.ops->test_1 = NULL; 191 192 err = struct_ops_nulled_out_cb__load(skel); 193 if (!ASSERT_OK(err, "skel_load")) 194 goto cleanup; 195 196 ASSERT_FALSE(bpf_program__autoload(skel->progs.test_1_turn_off), "prog_autoload"); 197 ASSERT_LT(bpf_program__fd(skel->progs.test_1_turn_off), 0, "prog_fd"); 198 199 cleanup: 200 struct_ops_nulled_out_cb__destroy(skel); 201 } 202 203 /* validate that libbpf generates reasonable error message if struct_ops is 204 * not referenced in any struct_ops map 205 */ 206 static void test_struct_ops_forgotten_cb(void) 207 { 208 struct struct_ops_forgotten_cb *skel; 209 char *log; 210 int err; 211 212 skel = struct_ops_forgotten_cb__open(); 213 if (!ASSERT_OK_PTR(skel, "skel_open")) 214 return; 215 216 start_libbpf_log_capture(); 217 218 err = struct_ops_forgotten_cb__load(skel); 219 if (!ASSERT_ERR(err, "skel_load")) 220 goto cleanup; 221 222 log = stop_libbpf_log_capture(); 223 ASSERT_HAS_SUBSTR(log, 224 "prog 'test_1_forgotten': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?", 225 "libbpf_log"); 226 free(log); 227 228 struct_ops_forgotten_cb__destroy(skel); 229 230 /* now let's programmatically use it, we should be fine now */ 231 skel = struct_ops_forgotten_cb__open(); 232 if (!ASSERT_OK_PTR(skel, "skel_open")) 233 return; 234 235 skel->struct_ops.ops->test_1 = skel->progs.test_1_forgotten; /* not anymore */ 236 237 err = struct_ops_forgotten_cb__load(skel); 238 if (!ASSERT_OK(err, "skel_load")) 239 goto cleanup; 240 241 cleanup: 242 struct_ops_forgotten_cb__destroy(skel); 243 } 244 245 void serial_test_struct_ops_module(void) 246 { 247 if (test__start_subtest("struct_ops_load")) 248 test_struct_ops_load(); 249 if (test__start_subtest("struct_ops_not_zeroed")) 250 test_struct_ops_not_zeroed(); 251 if (test__start_subtest("struct_ops_incompatible")) 252 test_struct_ops_incompatible(); 253 if (test__start_subtest("struct_ops_null_out_cb")) 254 test_struct_ops_nulled_out_cb(); 255 if (test__start_subtest("struct_ops_forgotten_cb")) 256 test_struct_ops_forgotten_cb(); 257 } 258 259