1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2019 Facebook */ 3 4 #include <linux/bpf.h> 5 #include <linux/bpf_verifier.h> 6 #include <linux/btf.h> 7 #include <linux/filter.h> 8 #include <linux/slab.h> 9 #include <linux/numa.h> 10 #include <linux/seq_file.h> 11 #include <linux/refcount.h> 12 #include <linux/mutex.h> 13 #include <linux/btf_ids.h> 14 #include <linux/rcupdate_wait.h> 15 #include <linux/poll.h> 16 17 struct bpf_struct_ops_value { 18 struct bpf_struct_ops_common_value common; 19 char data[] ____cacheline_aligned_in_smp; 20 }; 21 22 #define MAX_TRAMP_IMAGE_PAGES 8 23 24 struct bpf_struct_ops_map { 25 struct bpf_map map; 26 const struct bpf_struct_ops_desc *st_ops_desc; 27 /* protect map_update */ 28 struct mutex lock; 29 /* link has all the bpf_links that is populated 30 * to the func ptr of the kernel's struct 31 * (in kvalue.data). 32 */ 33 struct bpf_link **links; 34 /* ksyms for bpf trampolines */ 35 struct bpf_ksym **ksyms; 36 u32 funcs_cnt; 37 u32 image_pages_cnt; 38 /* image_pages is an array of pages that has all the trampolines 39 * that stores the func args before calling the bpf_prog. 40 */ 41 void *image_pages[MAX_TRAMP_IMAGE_PAGES]; 42 /* The owner moduler's btf. */ 43 struct btf *btf; 44 /* uvalue->data stores the kernel struct 45 * (e.g. tcp_congestion_ops) that is more useful 46 * to userspace than the kvalue. For example, 47 * the bpf_prog's id is stored instead of the kernel 48 * address of a func ptr. 49 */ 50 struct bpf_struct_ops_value *uvalue; 51 /* kvalue.data stores the actual kernel's struct 52 * (e.g. tcp_congestion_ops) that will be 53 * registered to the kernel subsystem. 54 */ 55 struct bpf_struct_ops_value kvalue; 56 }; 57 58 struct bpf_struct_ops_link { 59 struct bpf_link link; 60 struct bpf_map __rcu *map; 61 wait_queue_head_t wait_hup; 62 }; 63 64 static DEFINE_MUTEX(update_mutex); 65 66 #define VALUE_PREFIX "bpf_struct_ops_" 67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) 68 69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { 70 }; 71 72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = { 73 #ifdef CONFIG_NET 74 .test_run = bpf_struct_ops_test_run, 75 #endif 76 }; 77 78 BTF_ID_LIST(st_ops_ids) 79 BTF_ID(struct, module) 80 BTF_ID(struct, bpf_struct_ops_common_value) 81 82 enum { 83 IDX_MODULE_ID, 84 IDX_ST_OPS_COMMON_VALUE_ID, 85 }; 86 87 extern struct btf *btf_vmlinux; 88 89 static bool is_valid_value_type(struct btf *btf, s32 value_id, 90 const struct btf_type *type, 91 const char *value_name) 92 { 93 const struct btf_type *common_value_type; 94 const struct btf_member *member; 95 const struct btf_type *vt, *mt; 96 97 vt = btf_type_by_id(btf, value_id); 98 if (btf_vlen(vt) != 2) { 99 pr_warn("The number of %s's members should be 2, but we get %d\n", 100 value_name, btf_vlen(vt)); 101 return false; 102 } 103 member = btf_type_member(vt); 104 mt = btf_type_by_id(btf, member->type); 105 common_value_type = btf_type_by_id(btf_vmlinux, 106 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]); 107 if (mt != common_value_type) { 108 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n", 109 value_name); 110 return false; 111 } 112 member++; 113 mt = btf_type_by_id(btf, member->type); 114 if (mt != type) { 115 pr_warn("The second member of %s should be %s\n", 116 value_name, btf_name_by_offset(btf, type->name_off)); 117 return false; 118 } 119 120 return true; 121 } 122 123 static void *bpf_struct_ops_image_alloc(void) 124 { 125 void *image; 126 int err; 127 128 err = bpf_jit_charge_modmem(PAGE_SIZE); 129 if (err) 130 return ERR_PTR(err); 131 image = arch_alloc_bpf_trampoline(PAGE_SIZE); 132 if (!image) { 133 bpf_jit_uncharge_modmem(PAGE_SIZE); 134 return ERR_PTR(-ENOMEM); 135 } 136 137 return image; 138 } 139 140 void bpf_struct_ops_image_free(void *image) 141 { 142 if (image) { 143 arch_free_bpf_trampoline(image, PAGE_SIZE); 144 bpf_jit_uncharge_modmem(PAGE_SIZE); 145 } 146 } 147 148 #define MAYBE_NULL_SUFFIX "__nullable" 149 150 /* Prepare argument info for every nullable argument of a member of a 151 * struct_ops type. 152 * 153 * Initialize a struct bpf_struct_ops_arg_info according to type info of 154 * the arguments of a stub function. (Check kCFI for more information about 155 * stub functions.) 156 * 157 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info 158 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides 159 * the information that used by the verifier to check the arguments of the 160 * BPF struct_ops program assigned to the member. Here, we only care about 161 * the arguments that are marked as __nullable. 162 * 163 * The array of struct bpf_ctx_arg_aux is eventually assigned to 164 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the 165 * verifier. (See check_struct_ops_btf_id()) 166 * 167 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If 168 * fails, it will be kept untouched. 169 */ 170 static int prepare_arg_info(struct btf *btf, 171 const char *st_ops_name, 172 const char *member_name, 173 const struct btf_type *func_proto, void *stub_func_addr, 174 struct bpf_struct_ops_arg_info *arg_info) 175 { 176 const struct btf_type *stub_func_proto, *pointed_type; 177 const struct btf_param *stub_args, *args; 178 struct bpf_ctx_arg_aux *info, *info_buf; 179 u32 nargs, arg_no, info_cnt = 0; 180 char ksym[KSYM_SYMBOL_LEN]; 181 const char *stub_fname; 182 s32 stub_func_id; 183 u32 arg_btf_id; 184 int offset; 185 186 stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym); 187 if (!stub_fname) { 188 pr_warn("Cannot find the stub function name for the %s in struct %s\n", 189 member_name, st_ops_name); 190 return -ENOENT; 191 } 192 193 stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC); 194 if (stub_func_id < 0) { 195 pr_warn("Cannot find the stub function %s in btf\n", stub_fname); 196 return -ENOENT; 197 } 198 199 stub_func_proto = btf_type_by_id(btf, stub_func_id); 200 stub_func_proto = btf_type_by_id(btf, stub_func_proto->type); 201 202 /* Check if the number of arguments of the stub function is the same 203 * as the number of arguments of the function pointer. 204 */ 205 nargs = btf_type_vlen(func_proto); 206 if (nargs != btf_type_vlen(stub_func_proto)) { 207 pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n", 208 stub_fname, member_name, st_ops_name); 209 return -EINVAL; 210 } 211 212 if (!nargs) 213 return 0; 214 215 args = btf_params(func_proto); 216 stub_args = btf_params(stub_func_proto); 217 218 info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL); 219 if (!info_buf) 220 return -ENOMEM; 221 222 /* Prepare info for every nullable argument */ 223 info = info_buf; 224 for (arg_no = 0; arg_no < nargs; arg_no++) { 225 /* Skip arguments that is not suffixed with 226 * "__nullable". 227 */ 228 if (!btf_param_match_suffix(btf, &stub_args[arg_no], 229 MAYBE_NULL_SUFFIX)) 230 continue; 231 232 /* Should be a pointer to struct */ 233 pointed_type = btf_type_resolve_ptr(btf, 234 args[arg_no].type, 235 &arg_btf_id); 236 if (!pointed_type || 237 !btf_type_is_struct(pointed_type)) { 238 pr_warn("stub function %s has %s tagging to an unsupported type\n", 239 stub_fname, MAYBE_NULL_SUFFIX); 240 goto err_out; 241 } 242 243 offset = btf_ctx_arg_offset(btf, func_proto, arg_no); 244 if (offset < 0) { 245 pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n", 246 stub_fname, arg_no); 247 goto err_out; 248 } 249 250 if (args[arg_no].type != stub_args[arg_no].type) { 251 pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n", 252 arg_no, stub_fname); 253 goto err_out; 254 } 255 256 /* Fill the information of the new argument */ 257 info->reg_type = 258 PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL; 259 info->btf_id = arg_btf_id; 260 info->btf = btf; 261 info->offset = offset; 262 263 info++; 264 info_cnt++; 265 } 266 267 if (info_cnt) { 268 arg_info->info = info_buf; 269 arg_info->cnt = info_cnt; 270 } else { 271 kfree(info_buf); 272 } 273 274 return 0; 275 276 err_out: 277 kfree(info_buf); 278 279 return -EINVAL; 280 } 281 282 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */ 283 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc) 284 { 285 struct bpf_struct_ops_arg_info *arg_info; 286 int i; 287 288 arg_info = st_ops_desc->arg_info; 289 for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++) 290 kfree(arg_info[i].info); 291 292 kfree(arg_info); 293 } 294 295 static bool is_module_member(const struct btf *btf, u32 id) 296 { 297 const struct btf_type *t; 298 299 t = btf_type_resolve_ptr(btf, id, NULL); 300 if (!t) 301 return false; 302 303 if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t)) 304 return false; 305 306 return !strcmp(btf_name_by_offset(btf, t->name_off), "module"); 307 } 308 309 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) 310 { 311 void *func_ptr = *(void **)(st_ops->cfi_stubs + moff); 312 313 return func_ptr ? 0 : -ENOTSUPP; 314 } 315 316 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, 317 struct btf *btf, 318 struct bpf_verifier_log *log) 319 { 320 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; 321 struct bpf_struct_ops_arg_info *arg_info; 322 const struct btf_member *member; 323 const struct btf_type *t; 324 s32 type_id, value_id; 325 char value_name[128]; 326 const char *mname; 327 int i, err; 328 329 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= 330 sizeof(value_name)) { 331 pr_warn("struct_ops name %s is too long\n", 332 st_ops->name); 333 return -EINVAL; 334 } 335 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); 336 337 if (!st_ops->cfi_stubs) { 338 pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name); 339 return -EINVAL; 340 } 341 342 type_id = btf_find_by_name_kind(btf, st_ops->name, 343 BTF_KIND_STRUCT); 344 if (type_id < 0) { 345 pr_warn("Cannot find struct %s in %s\n", 346 st_ops->name, btf_get_name(btf)); 347 return -EINVAL; 348 } 349 t = btf_type_by_id(btf, type_id); 350 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { 351 pr_warn("Cannot support #%u members in struct %s\n", 352 btf_type_vlen(t), st_ops->name); 353 return -EINVAL; 354 } 355 356 value_id = btf_find_by_name_kind(btf, value_name, 357 BTF_KIND_STRUCT); 358 if (value_id < 0) { 359 pr_warn("Cannot find struct %s in %s\n", 360 value_name, btf_get_name(btf)); 361 return -EINVAL; 362 } 363 if (!is_valid_value_type(btf, value_id, t, value_name)) 364 return -EINVAL; 365 366 arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info), 367 GFP_KERNEL); 368 if (!arg_info) 369 return -ENOMEM; 370 371 st_ops_desc->arg_info = arg_info; 372 st_ops_desc->type = t; 373 st_ops_desc->type_id = type_id; 374 st_ops_desc->value_id = value_id; 375 st_ops_desc->value_type = btf_type_by_id(btf, value_id); 376 377 for_each_member(i, t, member) { 378 const struct btf_type *func_proto; 379 void **stub_func_addr; 380 u32 moff; 381 382 moff = __btf_member_bit_offset(t, member) / 8; 383 mname = btf_name_by_offset(btf, member->name_off); 384 if (!*mname) { 385 pr_warn("anon member in struct %s is not supported\n", 386 st_ops->name); 387 err = -EOPNOTSUPP; 388 goto errout; 389 } 390 391 if (__btf_member_bitfield_size(t, member)) { 392 pr_warn("bit field member %s in struct %s is not supported\n", 393 mname, st_ops->name); 394 err = -EOPNOTSUPP; 395 goto errout; 396 } 397 398 if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) { 399 pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n", 400 st_ops->name); 401 err = -EOPNOTSUPP; 402 goto errout; 403 } 404 405 func_proto = btf_type_resolve_func_ptr(btf, 406 member->type, 407 NULL); 408 409 /* The member is not a function pointer or 410 * the function pointer is not supported. 411 */ 412 if (!func_proto || bpf_struct_ops_supported(st_ops, moff)) 413 continue; 414 415 if (btf_distill_func_proto(log, btf, 416 func_proto, mname, 417 &st_ops->func_models[i])) { 418 pr_warn("Error in parsing func ptr %s in struct %s\n", 419 mname, st_ops->name); 420 err = -EINVAL; 421 goto errout; 422 } 423 424 stub_func_addr = *(void **)(st_ops->cfi_stubs + moff); 425 err = prepare_arg_info(btf, st_ops->name, mname, 426 func_proto, stub_func_addr, 427 arg_info + i); 428 if (err) 429 goto errout; 430 } 431 432 if (st_ops->init(btf)) { 433 pr_warn("Error in init bpf_struct_ops %s\n", 434 st_ops->name); 435 err = -EINVAL; 436 goto errout; 437 } 438 439 return 0; 440 441 errout: 442 bpf_struct_ops_desc_release(st_ops_desc); 443 444 return err; 445 } 446 447 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, 448 void *next_key) 449 { 450 if (key && *(u32 *)key == 0) 451 return -ENOENT; 452 453 *(u32 *)next_key = 0; 454 return 0; 455 } 456 457 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 458 void *value) 459 { 460 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 461 struct bpf_struct_ops_value *uvalue, *kvalue; 462 enum bpf_struct_ops_state state; 463 s64 refcnt; 464 465 if (unlikely(*(u32 *)key != 0)) 466 return -ENOENT; 467 468 kvalue = &st_map->kvalue; 469 /* Pair with smp_store_release() during map_update */ 470 state = smp_load_acquire(&kvalue->common.state); 471 if (state == BPF_STRUCT_OPS_STATE_INIT) { 472 memset(value, 0, map->value_size); 473 return 0; 474 } 475 476 /* No lock is needed. state and refcnt do not need 477 * to be updated together under atomic context. 478 */ 479 uvalue = value; 480 memcpy(uvalue, st_map->uvalue, map->value_size); 481 uvalue->common.state = state; 482 483 /* This value offers the user space a general estimate of how 484 * many sockets are still utilizing this struct_ops for TCP 485 * congestion control. The number might not be exact, but it 486 * should sufficiently meet our present goals. 487 */ 488 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt); 489 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0)); 490 491 return 0; 492 } 493 494 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) 495 { 496 return ERR_PTR(-EINVAL); 497 } 498 499 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) 500 { 501 u32 i; 502 503 for (i = 0; i < st_map->funcs_cnt; i++) { 504 if (!st_map->links[i]) 505 break; 506 bpf_link_put(st_map->links[i]); 507 st_map->links[i] = NULL; 508 } 509 } 510 511 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map) 512 { 513 int i; 514 515 for (i = 0; i < st_map->image_pages_cnt; i++) 516 bpf_struct_ops_image_free(st_map->image_pages[i]); 517 st_map->image_pages_cnt = 0; 518 } 519 520 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data) 521 { 522 const struct btf_member *member; 523 u32 i, moff, msize, prev_mend = 0; 524 const struct btf_type *mtype; 525 526 for_each_member(i, t, member) { 527 moff = __btf_member_bit_offset(t, member) / 8; 528 if (moff > prev_mend && 529 memchr_inv(data + prev_mend, 0, moff - prev_mend)) 530 return -EINVAL; 531 532 mtype = btf_type_by_id(btf, member->type); 533 mtype = btf_resolve_size(btf, mtype, &msize); 534 if (IS_ERR(mtype)) 535 return PTR_ERR(mtype); 536 prev_mend = moff + msize; 537 } 538 539 if (t->size > prev_mend && 540 memchr_inv(data + prev_mend, 0, t->size - prev_mend)) 541 return -EINVAL; 542 543 return 0; 544 } 545 546 static void bpf_struct_ops_link_release(struct bpf_link *link) 547 { 548 } 549 550 static void bpf_struct_ops_link_dealloc(struct bpf_link *link) 551 { 552 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link); 553 554 kfree(tlink); 555 } 556 557 const struct bpf_link_ops bpf_struct_ops_link_lops = { 558 .release = bpf_struct_ops_link_release, 559 .dealloc = bpf_struct_ops_link_dealloc, 560 }; 561 562 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, 563 struct bpf_tramp_link *link, 564 const struct btf_func_model *model, 565 void *stub_func, 566 void **_image, u32 *_image_off, 567 bool allow_alloc) 568 { 569 u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT; 570 void *image = *_image; 571 int size; 572 573 tlinks[BPF_TRAMP_FENTRY].links[0] = link; 574 tlinks[BPF_TRAMP_FENTRY].nr_links = 1; 575 576 if (model->ret_size > 0) 577 flags |= BPF_TRAMP_F_RET_FENTRY_RET; 578 579 size = arch_bpf_trampoline_size(model, flags, tlinks, NULL); 580 if (size <= 0) 581 return size ? : -EFAULT; 582 583 /* Allocate image buffer if necessary */ 584 if (!image || size > PAGE_SIZE - image_off) { 585 if (!allow_alloc) 586 return -E2BIG; 587 588 image = bpf_struct_ops_image_alloc(); 589 if (IS_ERR(image)) 590 return PTR_ERR(image); 591 image_off = 0; 592 } 593 594 size = arch_prepare_bpf_trampoline(NULL, image + image_off, 595 image + image_off + size, 596 model, flags, tlinks, stub_func); 597 if (size <= 0) { 598 if (image != *_image) 599 bpf_struct_ops_image_free(image); 600 return size ? : -EFAULT; 601 } 602 603 *_image = image; 604 *_image_off = image_off + size; 605 return 0; 606 } 607 608 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname, 609 void *image, unsigned int size, 610 struct bpf_ksym *ksym) 611 { 612 snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname); 613 INIT_LIST_HEAD_RCU(&ksym->lnode); 614 bpf_image_ksym_init(image, size, ksym); 615 } 616 617 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map) 618 { 619 u32 i; 620 621 for (i = 0; i < st_map->funcs_cnt; i++) { 622 if (!st_map->ksyms[i]) 623 break; 624 bpf_image_ksym_add(st_map->ksyms[i]); 625 } 626 } 627 628 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map) 629 { 630 u32 i; 631 632 for (i = 0; i < st_map->funcs_cnt; i++) { 633 if (!st_map->ksyms[i]) 634 break; 635 bpf_image_ksym_del(st_map->ksyms[i]); 636 } 637 } 638 639 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map) 640 { 641 u32 i; 642 643 for (i = 0; i < st_map->funcs_cnt; i++) { 644 if (!st_map->ksyms[i]) 645 break; 646 kfree(st_map->ksyms[i]); 647 st_map->ksyms[i] = NULL; 648 } 649 } 650 651 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, 652 void *value, u64 flags) 653 { 654 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 655 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; 656 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; 657 struct bpf_struct_ops_value *uvalue, *kvalue; 658 const struct btf_type *module_type; 659 const struct btf_member *member; 660 const struct btf_type *t = st_ops_desc->type; 661 struct bpf_tramp_links *tlinks; 662 void *udata, *kdata; 663 int prog_fd, err; 664 u32 i, trampoline_start, image_off = 0; 665 void *cur_image = NULL, *image = NULL; 666 struct bpf_link **plink; 667 struct bpf_ksym **pksym; 668 const char *tname, *mname; 669 670 if (flags) 671 return -EINVAL; 672 673 if (*(u32 *)key != 0) 674 return -E2BIG; 675 676 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value); 677 if (err) 678 return err; 679 680 uvalue = value; 681 err = check_zero_holes(st_map->btf, t, uvalue->data); 682 if (err) 683 return err; 684 685 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt)) 686 return -EINVAL; 687 688 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); 689 if (!tlinks) 690 return -ENOMEM; 691 692 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue; 693 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue; 694 695 mutex_lock(&st_map->lock); 696 697 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) { 698 err = -EBUSY; 699 goto unlock; 700 } 701 702 memcpy(uvalue, value, map->value_size); 703 704 udata = &uvalue->data; 705 kdata = &kvalue->data; 706 707 plink = st_map->links; 708 pksym = st_map->ksyms; 709 tname = btf_name_by_offset(st_map->btf, t->name_off); 710 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]); 711 for_each_member(i, t, member) { 712 const struct btf_type *mtype, *ptype; 713 struct bpf_prog *prog; 714 struct bpf_tramp_link *link; 715 struct bpf_ksym *ksym; 716 u32 moff; 717 718 moff = __btf_member_bit_offset(t, member) / 8; 719 mname = btf_name_by_offset(st_map->btf, member->name_off); 720 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL); 721 if (ptype == module_type) { 722 if (*(void **)(udata + moff)) 723 goto reset_unlock; 724 *(void **)(kdata + moff) = BPF_MODULE_OWNER; 725 continue; 726 } 727 728 err = st_ops->init_member(t, member, kdata, udata); 729 if (err < 0) 730 goto reset_unlock; 731 732 /* The ->init_member() has handled this member */ 733 if (err > 0) 734 continue; 735 736 /* If st_ops->init_member does not handle it, 737 * we will only handle func ptrs and zero-ed members 738 * here. Reject everything else. 739 */ 740 741 /* All non func ptr member must be 0 */ 742 if (!ptype || !btf_type_is_func_proto(ptype)) { 743 u32 msize; 744 745 mtype = btf_type_by_id(st_map->btf, member->type); 746 mtype = btf_resolve_size(st_map->btf, mtype, &msize); 747 if (IS_ERR(mtype)) { 748 err = PTR_ERR(mtype); 749 goto reset_unlock; 750 } 751 752 if (memchr_inv(udata + moff, 0, msize)) { 753 err = -EINVAL; 754 goto reset_unlock; 755 } 756 757 continue; 758 } 759 760 prog_fd = (int)(*(unsigned long *)(udata + moff)); 761 /* Similar check as the attr->attach_prog_fd */ 762 if (!prog_fd) 763 continue; 764 765 prog = bpf_prog_get(prog_fd); 766 if (IS_ERR(prog)) { 767 err = PTR_ERR(prog); 768 goto reset_unlock; 769 } 770 771 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || 772 prog->aux->attach_btf_id != st_ops_desc->type_id || 773 prog->expected_attach_type != i) { 774 bpf_prog_put(prog); 775 err = -EINVAL; 776 goto reset_unlock; 777 } 778 779 link = kzalloc(sizeof(*link), GFP_USER); 780 if (!link) { 781 bpf_prog_put(prog); 782 err = -ENOMEM; 783 goto reset_unlock; 784 } 785 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, 786 &bpf_struct_ops_link_lops, prog); 787 *plink++ = &link->link; 788 789 ksym = kzalloc(sizeof(*ksym), GFP_USER); 790 if (!ksym) { 791 err = -ENOMEM; 792 goto reset_unlock; 793 } 794 *pksym++ = ksym; 795 796 trampoline_start = image_off; 797 err = bpf_struct_ops_prepare_trampoline(tlinks, link, 798 &st_ops->func_models[i], 799 *(void **)(st_ops->cfi_stubs + moff), 800 &image, &image_off, 801 st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES); 802 if (err) 803 goto reset_unlock; 804 805 if (cur_image != image) { 806 st_map->image_pages[st_map->image_pages_cnt++] = image; 807 cur_image = image; 808 trampoline_start = 0; 809 } 810 811 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset(); 812 813 /* put prog_id to udata */ 814 *(unsigned long *)(udata + moff) = prog->aux->id; 815 816 /* init ksym for this trampoline */ 817 bpf_struct_ops_ksym_init(tname, mname, 818 image + trampoline_start, 819 image_off - trampoline_start, 820 ksym); 821 } 822 823 if (st_ops->validate) { 824 err = st_ops->validate(kdata); 825 if (err) 826 goto reset_unlock; 827 } 828 for (i = 0; i < st_map->image_pages_cnt; i++) { 829 err = arch_protect_bpf_trampoline(st_map->image_pages[i], 830 PAGE_SIZE); 831 if (err) 832 goto reset_unlock; 833 } 834 835 if (st_map->map.map_flags & BPF_F_LINK) { 836 err = 0; 837 /* Let bpf_link handle registration & unregistration. 838 * 839 * Pair with smp_load_acquire() during lookup_elem(). 840 */ 841 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY); 842 goto unlock; 843 } 844 845 err = st_ops->reg(kdata, NULL); 846 if (likely(!err)) { 847 /* This refcnt increment on the map here after 848 * 'st_ops->reg()' is secure since the state of the 849 * map must be set to INIT at this moment, and thus 850 * bpf_struct_ops_map_delete_elem() can't unregister 851 * or transition it to TOBEFREE concurrently. 852 */ 853 bpf_map_inc(map); 854 /* Pair with smp_load_acquire() during lookup_elem(). 855 * It ensures the above udata updates (e.g. prog->aux->id) 856 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. 857 */ 858 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE); 859 goto unlock; 860 } 861 862 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be 863 * verified as a whole, after all init_member() calls. Can also happen if 864 * there was a race in registering the struct_ops (under the same name) to 865 * a sub-system through different struct_ops's maps. 866 */ 867 868 reset_unlock: 869 bpf_struct_ops_map_free_ksyms(st_map); 870 bpf_struct_ops_map_free_image(st_map); 871 bpf_struct_ops_map_put_progs(st_map); 872 memset(uvalue, 0, map->value_size); 873 memset(kvalue, 0, map->value_size); 874 unlock: 875 kfree(tlinks); 876 mutex_unlock(&st_map->lock); 877 if (!err) 878 bpf_struct_ops_map_add_ksyms(st_map); 879 return err; 880 } 881 882 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) 883 { 884 enum bpf_struct_ops_state prev_state; 885 struct bpf_struct_ops_map *st_map; 886 887 st_map = (struct bpf_struct_ops_map *)map; 888 if (st_map->map.map_flags & BPF_F_LINK) 889 return -EOPNOTSUPP; 890 891 prev_state = cmpxchg(&st_map->kvalue.common.state, 892 BPF_STRUCT_OPS_STATE_INUSE, 893 BPF_STRUCT_OPS_STATE_TOBEFREE); 894 switch (prev_state) { 895 case BPF_STRUCT_OPS_STATE_INUSE: 896 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL); 897 bpf_map_put(map); 898 return 0; 899 case BPF_STRUCT_OPS_STATE_TOBEFREE: 900 return -EINPROGRESS; 901 case BPF_STRUCT_OPS_STATE_INIT: 902 return -ENOENT; 903 default: 904 WARN_ON_ONCE(1); 905 /* Should never happen. Treat it as not found. */ 906 return -ENOENT; 907 } 908 } 909 910 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, 911 struct seq_file *m) 912 { 913 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 914 void *value; 915 int err; 916 917 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); 918 if (!value) 919 return; 920 921 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 922 if (!err) { 923 btf_type_seq_show(st_map->btf, 924 map->btf_vmlinux_value_type_id, 925 value, m); 926 seq_putc(m, '\n'); 927 } 928 929 kfree(value); 930 } 931 932 static void __bpf_struct_ops_map_free(struct bpf_map *map) 933 { 934 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 935 936 if (st_map->links) 937 bpf_struct_ops_map_put_progs(st_map); 938 if (st_map->ksyms) 939 bpf_struct_ops_map_free_ksyms(st_map); 940 bpf_map_area_free(st_map->links); 941 bpf_map_area_free(st_map->ksyms); 942 bpf_struct_ops_map_free_image(st_map); 943 bpf_map_area_free(st_map->uvalue); 944 bpf_map_area_free(st_map); 945 } 946 947 static void bpf_struct_ops_map_free(struct bpf_map *map) 948 { 949 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 950 951 /* st_ops->owner was acquired during map_alloc to implicitly holds 952 * the btf's refcnt. The acquire was only done when btf_is_module() 953 * st_map->btf cannot be NULL here. 954 */ 955 if (btf_is_module(st_map->btf)) 956 module_put(st_map->st_ops_desc->st_ops->owner); 957 958 bpf_struct_ops_map_del_ksyms(st_map); 959 960 /* The struct_ops's function may switch to another struct_ops. 961 * 962 * For example, bpf_tcp_cc_x->init() may switch to 963 * another tcp_cc_y by calling 964 * setsockopt(TCP_CONGESTION, "tcp_cc_y"). 965 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called 966 * and its refcount may reach 0 which then free its 967 * trampoline image while tcp_cc_x is still running. 968 * 969 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog 970 * to finish. bpf-tcp-cc prog is non sleepable. 971 * A rcu_tasks gp is to wait for the last few insn 972 * in the tramopline image to finish before releasing 973 * the trampoline image. 974 */ 975 synchronize_rcu_mult(call_rcu, call_rcu_tasks); 976 977 __bpf_struct_ops_map_free(map); 978 } 979 980 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) 981 { 982 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || 983 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) || 984 !attr->btf_vmlinux_value_type_id) 985 return -EINVAL; 986 return 0; 987 } 988 989 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t) 990 { 991 int i; 992 u32 count; 993 const struct btf_member *member; 994 995 count = 0; 996 for_each_member(i, t, member) 997 if (btf_type_resolve_func_ptr(btf, member->type, NULL)) 998 count++; 999 return count; 1000 } 1001 1002 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) 1003 { 1004 const struct bpf_struct_ops_desc *st_ops_desc; 1005 size_t st_map_size; 1006 struct bpf_struct_ops_map *st_map; 1007 const struct btf_type *t, *vt; 1008 struct module *mod = NULL; 1009 struct bpf_map *map; 1010 struct btf *btf; 1011 int ret; 1012 1013 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) { 1014 /* The map holds btf for its whole life time. */ 1015 btf = btf_get_by_fd(attr->value_type_btf_obj_fd); 1016 if (IS_ERR(btf)) 1017 return ERR_CAST(btf); 1018 if (!btf_is_module(btf)) { 1019 btf_put(btf); 1020 return ERR_PTR(-EINVAL); 1021 } 1022 1023 mod = btf_try_get_module(btf); 1024 /* mod holds a refcnt to btf. We don't need an extra refcnt 1025 * here. 1026 */ 1027 btf_put(btf); 1028 if (!mod) 1029 return ERR_PTR(-EINVAL); 1030 } else { 1031 btf = bpf_get_btf_vmlinux(); 1032 if (IS_ERR(btf)) 1033 return ERR_CAST(btf); 1034 if (!btf) 1035 return ERR_PTR(-ENOTSUPP); 1036 } 1037 1038 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id); 1039 if (!st_ops_desc) { 1040 ret = -ENOTSUPP; 1041 goto errout; 1042 } 1043 1044 vt = st_ops_desc->value_type; 1045 if (attr->value_size != vt->size) { 1046 ret = -EINVAL; 1047 goto errout; 1048 } 1049 1050 t = st_ops_desc->type; 1051 1052 st_map_size = sizeof(*st_map) + 1053 /* kvalue stores the 1054 * struct bpf_struct_ops_tcp_congestions_ops 1055 */ 1056 (vt->size - sizeof(struct bpf_struct_ops_value)); 1057 1058 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE); 1059 if (!st_map) { 1060 ret = -ENOMEM; 1061 goto errout; 1062 } 1063 1064 st_map->st_ops_desc = st_ops_desc; 1065 map = &st_map->map; 1066 1067 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); 1068 st_map->funcs_cnt = count_func_ptrs(btf, t); 1069 st_map->links = 1070 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *), 1071 NUMA_NO_NODE); 1072 1073 st_map->ksyms = 1074 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *), 1075 NUMA_NO_NODE); 1076 if (!st_map->uvalue || !st_map->links || !st_map->ksyms) { 1077 ret = -ENOMEM; 1078 goto errout_free; 1079 } 1080 st_map->btf = btf; 1081 1082 mutex_init(&st_map->lock); 1083 bpf_map_init_from_attr(map, attr); 1084 1085 return map; 1086 1087 errout_free: 1088 __bpf_struct_ops_map_free(map); 1089 errout: 1090 module_put(mod); 1091 1092 return ERR_PTR(ret); 1093 } 1094 1095 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map) 1096 { 1097 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 1098 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; 1099 const struct btf_type *vt = st_ops_desc->value_type; 1100 u64 usage; 1101 1102 usage = sizeof(*st_map) + 1103 vt->size - sizeof(struct bpf_struct_ops_value); 1104 usage += vt->size; 1105 usage += st_map->funcs_cnt * sizeof(struct bpf_link *); 1106 usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *); 1107 usage += PAGE_SIZE; 1108 return usage; 1109 } 1110 1111 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map) 1112 const struct bpf_map_ops bpf_struct_ops_map_ops = { 1113 .map_alloc_check = bpf_struct_ops_map_alloc_check, 1114 .map_alloc = bpf_struct_ops_map_alloc, 1115 .map_free = bpf_struct_ops_map_free, 1116 .map_get_next_key = bpf_struct_ops_map_get_next_key, 1117 .map_lookup_elem = bpf_struct_ops_map_lookup_elem, 1118 .map_delete_elem = bpf_struct_ops_map_delete_elem, 1119 .map_update_elem = bpf_struct_ops_map_update_elem, 1120 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, 1121 .map_mem_usage = bpf_struct_ops_map_mem_usage, 1122 .map_btf_id = &bpf_struct_ops_map_btf_ids[0], 1123 }; 1124 1125 /* "const void *" because some subsystem is 1126 * passing a const (e.g. const struct tcp_congestion_ops *) 1127 */ 1128 bool bpf_struct_ops_get(const void *kdata) 1129 { 1130 struct bpf_struct_ops_value *kvalue; 1131 struct bpf_struct_ops_map *st_map; 1132 struct bpf_map *map; 1133 1134 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); 1135 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue); 1136 1137 map = __bpf_map_inc_not_zero(&st_map->map, false); 1138 return !IS_ERR(map); 1139 } 1140 1141 void bpf_struct_ops_put(const void *kdata) 1142 { 1143 struct bpf_struct_ops_value *kvalue; 1144 struct bpf_struct_ops_map *st_map; 1145 1146 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); 1147 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue); 1148 1149 bpf_map_put(&st_map->map); 1150 } 1151 1152 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) 1153 { 1154 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 1155 1156 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS && 1157 map->map_flags & BPF_F_LINK && 1158 /* Pair with smp_store_release() during map_update */ 1159 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY; 1160 } 1161 1162 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) 1163 { 1164 struct bpf_struct_ops_link *st_link; 1165 struct bpf_struct_ops_map *st_map; 1166 1167 st_link = container_of(link, struct bpf_struct_ops_link, link); 1168 st_map = (struct bpf_struct_ops_map *) 1169 rcu_dereference_protected(st_link->map, true); 1170 if (st_map) { 1171 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link); 1172 bpf_map_put(&st_map->map); 1173 } 1174 kfree(st_link); 1175 } 1176 1177 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link, 1178 struct seq_file *seq) 1179 { 1180 struct bpf_struct_ops_link *st_link; 1181 struct bpf_map *map; 1182 1183 st_link = container_of(link, struct bpf_struct_ops_link, link); 1184 rcu_read_lock(); 1185 map = rcu_dereference(st_link->map); 1186 if (map) 1187 seq_printf(seq, "map_id:\t%d\n", map->id); 1188 rcu_read_unlock(); 1189 } 1190 1191 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link, 1192 struct bpf_link_info *info) 1193 { 1194 struct bpf_struct_ops_link *st_link; 1195 struct bpf_map *map; 1196 1197 st_link = container_of(link, struct bpf_struct_ops_link, link); 1198 rcu_read_lock(); 1199 map = rcu_dereference(st_link->map); 1200 if (map) 1201 info->struct_ops.map_id = map->id; 1202 rcu_read_unlock(); 1203 return 0; 1204 } 1205 1206 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map, 1207 struct bpf_map *expected_old_map) 1208 { 1209 struct bpf_struct_ops_map *st_map, *old_st_map; 1210 struct bpf_map *old_map; 1211 struct bpf_struct_ops_link *st_link; 1212 int err; 1213 1214 st_link = container_of(link, struct bpf_struct_ops_link, link); 1215 st_map = container_of(new_map, struct bpf_struct_ops_map, map); 1216 1217 if (!bpf_struct_ops_valid_to_reg(new_map)) 1218 return -EINVAL; 1219 1220 if (!st_map->st_ops_desc->st_ops->update) 1221 return -EOPNOTSUPP; 1222 1223 mutex_lock(&update_mutex); 1224 1225 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); 1226 if (!old_map) { 1227 err = -ENOLINK; 1228 goto err_out; 1229 } 1230 if (expected_old_map && old_map != expected_old_map) { 1231 err = -EPERM; 1232 goto err_out; 1233 } 1234 1235 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map); 1236 /* The new and old struct_ops must be the same type. */ 1237 if (st_map->st_ops_desc != old_st_map->st_ops_desc) { 1238 err = -EINVAL; 1239 goto err_out; 1240 } 1241 1242 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link); 1243 if (err) 1244 goto err_out; 1245 1246 bpf_map_inc(new_map); 1247 rcu_assign_pointer(st_link->map, new_map); 1248 bpf_map_put(old_map); 1249 1250 err_out: 1251 mutex_unlock(&update_mutex); 1252 1253 return err; 1254 } 1255 1256 static int bpf_struct_ops_map_link_detach(struct bpf_link *link) 1257 { 1258 struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link); 1259 struct bpf_struct_ops_map *st_map; 1260 struct bpf_map *map; 1261 1262 mutex_lock(&update_mutex); 1263 1264 map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); 1265 if (!map) { 1266 mutex_unlock(&update_mutex); 1267 return 0; 1268 } 1269 st_map = container_of(map, struct bpf_struct_ops_map, map); 1270 1271 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link); 1272 1273 RCU_INIT_POINTER(st_link->map, NULL); 1274 /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or 1275 * bpf_map_inc() in bpf_struct_ops_map_link_update(). 1276 */ 1277 bpf_map_put(&st_map->map); 1278 1279 mutex_unlock(&update_mutex); 1280 1281 wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP); 1282 1283 return 0; 1284 } 1285 1286 static __poll_t bpf_struct_ops_map_link_poll(struct file *file, 1287 struct poll_table_struct *pts) 1288 { 1289 struct bpf_struct_ops_link *st_link = file->private_data; 1290 1291 poll_wait(file, &st_link->wait_hup, pts); 1292 1293 return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP; 1294 } 1295 1296 static const struct bpf_link_ops bpf_struct_ops_map_lops = { 1297 .dealloc = bpf_struct_ops_map_link_dealloc, 1298 .detach = bpf_struct_ops_map_link_detach, 1299 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, 1300 .fill_link_info = bpf_struct_ops_map_link_fill_link_info, 1301 .update_map = bpf_struct_ops_map_link_update, 1302 .poll = bpf_struct_ops_map_link_poll, 1303 }; 1304 1305 int bpf_struct_ops_link_create(union bpf_attr *attr) 1306 { 1307 struct bpf_struct_ops_link *link = NULL; 1308 struct bpf_link_primer link_primer; 1309 struct bpf_struct_ops_map *st_map; 1310 struct bpf_map *map; 1311 int err; 1312 1313 map = bpf_map_get(attr->link_create.map_fd); 1314 if (IS_ERR(map)) 1315 return PTR_ERR(map); 1316 1317 st_map = (struct bpf_struct_ops_map *)map; 1318 1319 if (!bpf_struct_ops_valid_to_reg(map)) { 1320 err = -EINVAL; 1321 goto err_out; 1322 } 1323 1324 link = kzalloc(sizeof(*link), GFP_USER); 1325 if (!link) { 1326 err = -ENOMEM; 1327 goto err_out; 1328 } 1329 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL); 1330 1331 err = bpf_link_prime(&link->link, &link_primer); 1332 if (err) 1333 goto err_out; 1334 1335 init_waitqueue_head(&link->wait_hup); 1336 1337 /* Hold the update_mutex such that the subsystem cannot 1338 * do link->ops->detach() before the link is fully initialized. 1339 */ 1340 mutex_lock(&update_mutex); 1341 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link); 1342 if (err) { 1343 mutex_unlock(&update_mutex); 1344 bpf_link_cleanup(&link_primer); 1345 link = NULL; 1346 goto err_out; 1347 } 1348 RCU_INIT_POINTER(link->map, map); 1349 mutex_unlock(&update_mutex); 1350 1351 return bpf_link_settle(&link_primer); 1352 1353 err_out: 1354 bpf_map_put(map); 1355 kfree(link); 1356 return err; 1357 } 1358 1359 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) 1360 { 1361 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 1362 1363 info->btf_vmlinux_id = btf_obj_id(st_map->btf); 1364 } 1365