1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2019 Facebook */ 3 4 #include <linux/bpf.h> 5 #include <linux/bpf_verifier.h> 6 #include <linux/btf.h> 7 #include <linux/filter.h> 8 #include <linux/slab.h> 9 #include <linux/numa.h> 10 #include <linux/seq_file.h> 11 #include <linux/refcount.h> 12 #include <linux/mutex.h> 13 #include <linux/btf_ids.h> 14 #include <linux/rcupdate_wait.h> 15 16 struct bpf_struct_ops_value { 17 struct bpf_struct_ops_common_value common; 18 char data[] ____cacheline_aligned_in_smp; 19 }; 20 21 struct bpf_struct_ops_map { 22 struct bpf_map map; 23 struct rcu_head rcu; 24 const struct bpf_struct_ops_desc *st_ops_desc; 25 /* protect map_update */ 26 struct mutex lock; 27 /* link has all the bpf_links that is populated 28 * to the func ptr of the kernel's struct 29 * (in kvalue.data). 30 */ 31 struct bpf_link **links; 32 u32 links_cnt; 33 /* image is a page that has all the trampolines 34 * that stores the func args before calling the bpf_prog. 35 * A PAGE_SIZE "image" is enough to store all trampoline for 36 * "links[]". 37 */ 38 void *image; 39 /* The owner moduler's btf. */ 40 struct btf *btf; 41 /* uvalue->data stores the kernel struct 42 * (e.g. tcp_congestion_ops) that is more useful 43 * to userspace than the kvalue. For example, 44 * the bpf_prog's id is stored instead of the kernel 45 * address of a func ptr. 46 */ 47 struct bpf_struct_ops_value *uvalue; 48 /* kvalue.data stores the actual kernel's struct 49 * (e.g. tcp_congestion_ops) that will be 50 * registered to the kernel subsystem. 51 */ 52 struct bpf_struct_ops_value kvalue; 53 }; 54 55 struct bpf_struct_ops_link { 56 struct bpf_link link; 57 struct bpf_map __rcu *map; 58 }; 59 60 static DEFINE_MUTEX(update_mutex); 61 62 #define VALUE_PREFIX "bpf_struct_ops_" 63 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) 64 65 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { 66 }; 67 68 const struct bpf_prog_ops bpf_struct_ops_prog_ops = { 69 #ifdef CONFIG_NET 70 .test_run = bpf_struct_ops_test_run, 71 #endif 72 }; 73 74 BTF_ID_LIST(st_ops_ids) 75 BTF_ID(struct, module) 76 BTF_ID(struct, bpf_struct_ops_common_value) 77 78 enum { 79 IDX_MODULE_ID, 80 IDX_ST_OPS_COMMON_VALUE_ID, 81 }; 82 83 extern struct btf *btf_vmlinux; 84 85 static bool is_valid_value_type(struct btf *btf, s32 value_id, 86 const struct btf_type *type, 87 const char *value_name) 88 { 89 const struct btf_type *common_value_type; 90 const struct btf_member *member; 91 const struct btf_type *vt, *mt; 92 93 vt = btf_type_by_id(btf, value_id); 94 if (btf_vlen(vt) != 2) { 95 pr_warn("The number of %s's members should be 2, but we get %d\n", 96 value_name, btf_vlen(vt)); 97 return false; 98 } 99 member = btf_type_member(vt); 100 mt = btf_type_by_id(btf, member->type); 101 common_value_type = btf_type_by_id(btf_vmlinux, 102 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]); 103 if (mt != common_value_type) { 104 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n", 105 value_name); 106 return false; 107 } 108 member++; 109 mt = btf_type_by_id(btf, member->type); 110 if (mt != type) { 111 pr_warn("The second member of %s should be %s\n", 112 value_name, btf_name_by_offset(btf, type->name_off)); 113 return false; 114 } 115 116 return true; 117 } 118 119 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, 120 struct btf *btf, 121 struct bpf_verifier_log *log) 122 { 123 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; 124 const struct btf_member *member; 125 const struct btf_type *t; 126 s32 type_id, value_id; 127 char value_name[128]; 128 const char *mname; 129 int i; 130 131 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= 132 sizeof(value_name)) { 133 pr_warn("struct_ops name %s is too long\n", 134 st_ops->name); 135 return -EINVAL; 136 } 137 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); 138 139 type_id = btf_find_by_name_kind(btf, st_ops->name, 140 BTF_KIND_STRUCT); 141 if (type_id < 0) { 142 pr_warn("Cannot find struct %s in %s\n", 143 st_ops->name, btf_get_name(btf)); 144 return -EINVAL; 145 } 146 t = btf_type_by_id(btf, type_id); 147 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { 148 pr_warn("Cannot support #%u members in struct %s\n", 149 btf_type_vlen(t), st_ops->name); 150 return -EINVAL; 151 } 152 153 value_id = btf_find_by_name_kind(btf, value_name, 154 BTF_KIND_STRUCT); 155 if (value_id < 0) { 156 pr_warn("Cannot find struct %s in %s\n", 157 value_name, btf_get_name(btf)); 158 return -EINVAL; 159 } 160 if (!is_valid_value_type(btf, value_id, t, value_name)) 161 return -EINVAL; 162 163 for_each_member(i, t, member) { 164 const struct btf_type *func_proto; 165 166 mname = btf_name_by_offset(btf, member->name_off); 167 if (!*mname) { 168 pr_warn("anon member in struct %s is not supported\n", 169 st_ops->name); 170 return -EOPNOTSUPP; 171 } 172 173 if (__btf_member_bitfield_size(t, member)) { 174 pr_warn("bit field member %s in struct %s is not supported\n", 175 mname, st_ops->name); 176 return -EOPNOTSUPP; 177 } 178 179 func_proto = btf_type_resolve_func_ptr(btf, 180 member->type, 181 NULL); 182 if (func_proto && 183 btf_distill_func_proto(log, btf, 184 func_proto, mname, 185 &st_ops->func_models[i])) { 186 pr_warn("Error in parsing func ptr %s in struct %s\n", 187 mname, st_ops->name); 188 return -EINVAL; 189 } 190 } 191 192 if (st_ops->init(btf)) { 193 pr_warn("Error in init bpf_struct_ops %s\n", 194 st_ops->name); 195 return -EINVAL; 196 } 197 198 st_ops_desc->type_id = type_id; 199 st_ops_desc->type = t; 200 st_ops_desc->value_id = value_id; 201 st_ops_desc->value_type = btf_type_by_id(btf, value_id); 202 203 return 0; 204 } 205 206 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, 207 void *next_key) 208 { 209 if (key && *(u32 *)key == 0) 210 return -ENOENT; 211 212 *(u32 *)next_key = 0; 213 return 0; 214 } 215 216 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 217 void *value) 218 { 219 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 220 struct bpf_struct_ops_value *uvalue, *kvalue; 221 enum bpf_struct_ops_state state; 222 s64 refcnt; 223 224 if (unlikely(*(u32 *)key != 0)) 225 return -ENOENT; 226 227 kvalue = &st_map->kvalue; 228 /* Pair with smp_store_release() during map_update */ 229 state = smp_load_acquire(&kvalue->common.state); 230 if (state == BPF_STRUCT_OPS_STATE_INIT) { 231 memset(value, 0, map->value_size); 232 return 0; 233 } 234 235 /* No lock is needed. state and refcnt do not need 236 * to be updated together under atomic context. 237 */ 238 uvalue = value; 239 memcpy(uvalue, st_map->uvalue, map->value_size); 240 uvalue->common.state = state; 241 242 /* This value offers the user space a general estimate of how 243 * many sockets are still utilizing this struct_ops for TCP 244 * congestion control. The number might not be exact, but it 245 * should sufficiently meet our present goals. 246 */ 247 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt); 248 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0)); 249 250 return 0; 251 } 252 253 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) 254 { 255 return ERR_PTR(-EINVAL); 256 } 257 258 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) 259 { 260 u32 i; 261 262 for (i = 0; i < st_map->links_cnt; i++) { 263 if (st_map->links[i]) { 264 bpf_link_put(st_map->links[i]); 265 st_map->links[i] = NULL; 266 } 267 } 268 } 269 270 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data) 271 { 272 const struct btf_member *member; 273 u32 i, moff, msize, prev_mend = 0; 274 const struct btf_type *mtype; 275 276 for_each_member(i, t, member) { 277 moff = __btf_member_bit_offset(t, member) / 8; 278 if (moff > prev_mend && 279 memchr_inv(data + prev_mend, 0, moff - prev_mend)) 280 return -EINVAL; 281 282 mtype = btf_type_by_id(btf, member->type); 283 mtype = btf_resolve_size(btf, mtype, &msize); 284 if (IS_ERR(mtype)) 285 return PTR_ERR(mtype); 286 prev_mend = moff + msize; 287 } 288 289 if (t->size > prev_mend && 290 memchr_inv(data + prev_mend, 0, t->size - prev_mend)) 291 return -EINVAL; 292 293 return 0; 294 } 295 296 static void bpf_struct_ops_link_release(struct bpf_link *link) 297 { 298 } 299 300 static void bpf_struct_ops_link_dealloc(struct bpf_link *link) 301 { 302 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link); 303 304 kfree(tlink); 305 } 306 307 const struct bpf_link_ops bpf_struct_ops_link_lops = { 308 .release = bpf_struct_ops_link_release, 309 .dealloc = bpf_struct_ops_link_dealloc, 310 }; 311 312 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, 313 struct bpf_tramp_link *link, 314 const struct btf_func_model *model, 315 void *stub_func, void *image, void *image_end) 316 { 317 u32 flags = BPF_TRAMP_F_INDIRECT; 318 int size; 319 320 tlinks[BPF_TRAMP_FENTRY].links[0] = link; 321 tlinks[BPF_TRAMP_FENTRY].nr_links = 1; 322 323 if (model->ret_size > 0) 324 flags |= BPF_TRAMP_F_RET_FENTRY_RET; 325 326 size = arch_bpf_trampoline_size(model, flags, tlinks, NULL); 327 if (size < 0) 328 return size; 329 if (size > (unsigned long)image_end - (unsigned long)image) 330 return -E2BIG; 331 return arch_prepare_bpf_trampoline(NULL, image, image_end, 332 model, flags, tlinks, stub_func); 333 } 334 335 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, 336 void *value, u64 flags) 337 { 338 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 339 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; 340 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; 341 struct bpf_struct_ops_value *uvalue, *kvalue; 342 const struct btf_type *module_type; 343 const struct btf_member *member; 344 const struct btf_type *t = st_ops_desc->type; 345 struct bpf_tramp_links *tlinks; 346 void *udata, *kdata; 347 int prog_fd, err; 348 void *image, *image_end; 349 u32 i; 350 351 if (flags) 352 return -EINVAL; 353 354 if (*(u32 *)key != 0) 355 return -E2BIG; 356 357 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value); 358 if (err) 359 return err; 360 361 uvalue = value; 362 err = check_zero_holes(st_map->btf, t, uvalue->data); 363 if (err) 364 return err; 365 366 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt)) 367 return -EINVAL; 368 369 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); 370 if (!tlinks) 371 return -ENOMEM; 372 373 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue; 374 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue; 375 376 mutex_lock(&st_map->lock); 377 378 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) { 379 err = -EBUSY; 380 goto unlock; 381 } 382 383 memcpy(uvalue, value, map->value_size); 384 385 udata = &uvalue->data; 386 kdata = &kvalue->data; 387 image = st_map->image; 388 image_end = st_map->image + PAGE_SIZE; 389 390 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]); 391 for_each_member(i, t, member) { 392 const struct btf_type *mtype, *ptype; 393 struct bpf_prog *prog; 394 struct bpf_tramp_link *link; 395 u32 moff; 396 397 moff = __btf_member_bit_offset(t, member) / 8; 398 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL); 399 if (ptype == module_type) { 400 if (*(void **)(udata + moff)) 401 goto reset_unlock; 402 *(void **)(kdata + moff) = BPF_MODULE_OWNER; 403 continue; 404 } 405 406 err = st_ops->init_member(t, member, kdata, udata); 407 if (err < 0) 408 goto reset_unlock; 409 410 /* The ->init_member() has handled this member */ 411 if (err > 0) 412 continue; 413 414 /* If st_ops->init_member does not handle it, 415 * we will only handle func ptrs and zero-ed members 416 * here. Reject everything else. 417 */ 418 419 /* All non func ptr member must be 0 */ 420 if (!ptype || !btf_type_is_func_proto(ptype)) { 421 u32 msize; 422 423 mtype = btf_type_by_id(st_map->btf, member->type); 424 mtype = btf_resolve_size(st_map->btf, mtype, &msize); 425 if (IS_ERR(mtype)) { 426 err = PTR_ERR(mtype); 427 goto reset_unlock; 428 } 429 430 if (memchr_inv(udata + moff, 0, msize)) { 431 err = -EINVAL; 432 goto reset_unlock; 433 } 434 435 continue; 436 } 437 438 prog_fd = (int)(*(unsigned long *)(udata + moff)); 439 /* Similar check as the attr->attach_prog_fd */ 440 if (!prog_fd) 441 continue; 442 443 prog = bpf_prog_get(prog_fd); 444 if (IS_ERR(prog)) { 445 err = PTR_ERR(prog); 446 goto reset_unlock; 447 } 448 449 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || 450 prog->aux->attach_btf_id != st_ops_desc->type_id || 451 prog->expected_attach_type != i) { 452 bpf_prog_put(prog); 453 err = -EINVAL; 454 goto reset_unlock; 455 } 456 457 link = kzalloc(sizeof(*link), GFP_USER); 458 if (!link) { 459 bpf_prog_put(prog); 460 err = -ENOMEM; 461 goto reset_unlock; 462 } 463 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, 464 &bpf_struct_ops_link_lops, prog); 465 st_map->links[i] = &link->link; 466 467 err = bpf_struct_ops_prepare_trampoline(tlinks, link, 468 &st_ops->func_models[i], 469 *(void **)(st_ops->cfi_stubs + moff), 470 image, image_end); 471 if (err < 0) 472 goto reset_unlock; 473 474 *(void **)(kdata + moff) = image + cfi_get_offset(); 475 image += err; 476 477 /* put prog_id to udata */ 478 *(unsigned long *)(udata + moff) = prog->aux->id; 479 } 480 481 if (st_map->map.map_flags & BPF_F_LINK) { 482 err = 0; 483 if (st_ops->validate) { 484 err = st_ops->validate(kdata); 485 if (err) 486 goto reset_unlock; 487 } 488 arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); 489 /* Let bpf_link handle registration & unregistration. 490 * 491 * Pair with smp_load_acquire() during lookup_elem(). 492 */ 493 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY); 494 goto unlock; 495 } 496 497 arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); 498 err = st_ops->reg(kdata); 499 if (likely(!err)) { 500 /* This refcnt increment on the map here after 501 * 'st_ops->reg()' is secure since the state of the 502 * map must be set to INIT at this moment, and thus 503 * bpf_struct_ops_map_delete_elem() can't unregister 504 * or transition it to TOBEFREE concurrently. 505 */ 506 bpf_map_inc(map); 507 /* Pair with smp_load_acquire() during lookup_elem(). 508 * It ensures the above udata updates (e.g. prog->aux->id) 509 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. 510 */ 511 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE); 512 goto unlock; 513 } 514 515 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be 516 * verified as a whole, after all init_member() calls. Can also happen if 517 * there was a race in registering the struct_ops (under the same name) to 518 * a sub-system through different struct_ops's maps. 519 */ 520 arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE); 521 522 reset_unlock: 523 bpf_struct_ops_map_put_progs(st_map); 524 memset(uvalue, 0, map->value_size); 525 memset(kvalue, 0, map->value_size); 526 unlock: 527 kfree(tlinks); 528 mutex_unlock(&st_map->lock); 529 return err; 530 } 531 532 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) 533 { 534 enum bpf_struct_ops_state prev_state; 535 struct bpf_struct_ops_map *st_map; 536 537 st_map = (struct bpf_struct_ops_map *)map; 538 if (st_map->map.map_flags & BPF_F_LINK) 539 return -EOPNOTSUPP; 540 541 prev_state = cmpxchg(&st_map->kvalue.common.state, 542 BPF_STRUCT_OPS_STATE_INUSE, 543 BPF_STRUCT_OPS_STATE_TOBEFREE); 544 switch (prev_state) { 545 case BPF_STRUCT_OPS_STATE_INUSE: 546 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); 547 bpf_map_put(map); 548 return 0; 549 case BPF_STRUCT_OPS_STATE_TOBEFREE: 550 return -EINPROGRESS; 551 case BPF_STRUCT_OPS_STATE_INIT: 552 return -ENOENT; 553 default: 554 WARN_ON_ONCE(1); 555 /* Should never happen. Treat it as not found. */ 556 return -ENOENT; 557 } 558 } 559 560 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, 561 struct seq_file *m) 562 { 563 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 564 void *value; 565 int err; 566 567 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); 568 if (!value) 569 return; 570 571 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 572 if (!err) { 573 btf_type_seq_show(st_map->btf, 574 map->btf_vmlinux_value_type_id, 575 value, m); 576 seq_puts(m, "\n"); 577 } 578 579 kfree(value); 580 } 581 582 static void __bpf_struct_ops_map_free(struct bpf_map *map) 583 { 584 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 585 586 if (st_map->links) 587 bpf_struct_ops_map_put_progs(st_map); 588 bpf_map_area_free(st_map->links); 589 if (st_map->image) { 590 arch_free_bpf_trampoline(st_map->image, PAGE_SIZE); 591 bpf_jit_uncharge_modmem(PAGE_SIZE); 592 } 593 bpf_map_area_free(st_map->uvalue); 594 bpf_map_area_free(st_map); 595 } 596 597 static void bpf_struct_ops_map_free(struct bpf_map *map) 598 { 599 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 600 601 /* st_ops->owner was acquired during map_alloc to implicitly holds 602 * the btf's refcnt. The acquire was only done when btf_is_module() 603 * st_map->btf cannot be NULL here. 604 */ 605 if (btf_is_module(st_map->btf)) 606 module_put(st_map->st_ops_desc->st_ops->owner); 607 608 /* The struct_ops's function may switch to another struct_ops. 609 * 610 * For example, bpf_tcp_cc_x->init() may switch to 611 * another tcp_cc_y by calling 612 * setsockopt(TCP_CONGESTION, "tcp_cc_y"). 613 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called 614 * and its refcount may reach 0 which then free its 615 * trampoline image while tcp_cc_x is still running. 616 * 617 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog 618 * to finish. bpf-tcp-cc prog is non sleepable. 619 * A rcu_tasks gp is to wait for the last few insn 620 * in the tramopline image to finish before releasing 621 * the trampoline image. 622 */ 623 synchronize_rcu_mult(call_rcu, call_rcu_tasks); 624 625 __bpf_struct_ops_map_free(map); 626 } 627 628 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) 629 { 630 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || 631 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) || 632 !attr->btf_vmlinux_value_type_id) 633 return -EINVAL; 634 return 0; 635 } 636 637 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) 638 { 639 const struct bpf_struct_ops_desc *st_ops_desc; 640 size_t st_map_size; 641 struct bpf_struct_ops_map *st_map; 642 const struct btf_type *t, *vt; 643 struct module *mod = NULL; 644 struct bpf_map *map; 645 struct btf *btf; 646 int ret; 647 648 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) { 649 /* The map holds btf for its whole life time. */ 650 btf = btf_get_by_fd(attr->value_type_btf_obj_fd); 651 if (IS_ERR(btf)) 652 return ERR_CAST(btf); 653 if (!btf_is_module(btf)) { 654 btf_put(btf); 655 return ERR_PTR(-EINVAL); 656 } 657 658 mod = btf_try_get_module(btf); 659 /* mod holds a refcnt to btf. We don't need an extra refcnt 660 * here. 661 */ 662 btf_put(btf); 663 if (!mod) 664 return ERR_PTR(-EINVAL); 665 } else { 666 btf = bpf_get_btf_vmlinux(); 667 if (IS_ERR(btf)) 668 return ERR_CAST(btf); 669 if (!btf) 670 return ERR_PTR(-ENOTSUPP); 671 } 672 673 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id); 674 if (!st_ops_desc) { 675 ret = -ENOTSUPP; 676 goto errout; 677 } 678 679 vt = st_ops_desc->value_type; 680 if (attr->value_size != vt->size) { 681 ret = -EINVAL; 682 goto errout; 683 } 684 685 t = st_ops_desc->type; 686 687 st_map_size = sizeof(*st_map) + 688 /* kvalue stores the 689 * struct bpf_struct_ops_tcp_congestions_ops 690 */ 691 (vt->size - sizeof(struct bpf_struct_ops_value)); 692 693 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE); 694 if (!st_map) { 695 ret = -ENOMEM; 696 goto errout; 697 } 698 699 st_map->st_ops_desc = st_ops_desc; 700 map = &st_map->map; 701 702 ret = bpf_jit_charge_modmem(PAGE_SIZE); 703 if (ret) 704 goto errout_free; 705 706 st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE); 707 if (!st_map->image) { 708 /* __bpf_struct_ops_map_free() uses st_map->image as flag 709 * for "charged or not". In this case, we need to unchange 710 * here. 711 */ 712 bpf_jit_uncharge_modmem(PAGE_SIZE); 713 ret = -ENOMEM; 714 goto errout_free; 715 } 716 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); 717 st_map->links_cnt = btf_type_vlen(t); 718 st_map->links = 719 bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *), 720 NUMA_NO_NODE); 721 if (!st_map->uvalue || !st_map->links) { 722 ret = -ENOMEM; 723 goto errout_free; 724 } 725 st_map->btf = btf; 726 727 mutex_init(&st_map->lock); 728 bpf_map_init_from_attr(map, attr); 729 730 return map; 731 732 errout_free: 733 __bpf_struct_ops_map_free(map); 734 errout: 735 module_put(mod); 736 737 return ERR_PTR(ret); 738 } 739 740 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map) 741 { 742 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 743 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; 744 const struct btf_type *vt = st_ops_desc->value_type; 745 u64 usage; 746 747 usage = sizeof(*st_map) + 748 vt->size - sizeof(struct bpf_struct_ops_value); 749 usage += vt->size; 750 usage += btf_type_vlen(vt) * sizeof(struct bpf_links *); 751 usage += PAGE_SIZE; 752 return usage; 753 } 754 755 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map) 756 const struct bpf_map_ops bpf_struct_ops_map_ops = { 757 .map_alloc_check = bpf_struct_ops_map_alloc_check, 758 .map_alloc = bpf_struct_ops_map_alloc, 759 .map_free = bpf_struct_ops_map_free, 760 .map_get_next_key = bpf_struct_ops_map_get_next_key, 761 .map_lookup_elem = bpf_struct_ops_map_lookup_elem, 762 .map_delete_elem = bpf_struct_ops_map_delete_elem, 763 .map_update_elem = bpf_struct_ops_map_update_elem, 764 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, 765 .map_mem_usage = bpf_struct_ops_map_mem_usage, 766 .map_btf_id = &bpf_struct_ops_map_btf_ids[0], 767 }; 768 769 /* "const void *" because some subsystem is 770 * passing a const (e.g. const struct tcp_congestion_ops *) 771 */ 772 bool bpf_struct_ops_get(const void *kdata) 773 { 774 struct bpf_struct_ops_value *kvalue; 775 struct bpf_struct_ops_map *st_map; 776 struct bpf_map *map; 777 778 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); 779 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue); 780 781 map = __bpf_map_inc_not_zero(&st_map->map, false); 782 return !IS_ERR(map); 783 } 784 785 void bpf_struct_ops_put(const void *kdata) 786 { 787 struct bpf_struct_ops_value *kvalue; 788 struct bpf_struct_ops_map *st_map; 789 790 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); 791 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue); 792 793 bpf_map_put(&st_map->map); 794 } 795 796 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) 797 { 798 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 799 800 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS && 801 map->map_flags & BPF_F_LINK && 802 /* Pair with smp_store_release() during map_update */ 803 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY; 804 } 805 806 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) 807 { 808 struct bpf_struct_ops_link *st_link; 809 struct bpf_struct_ops_map *st_map; 810 811 st_link = container_of(link, struct bpf_struct_ops_link, link); 812 st_map = (struct bpf_struct_ops_map *) 813 rcu_dereference_protected(st_link->map, true); 814 if (st_map) { 815 /* st_link->map can be NULL if 816 * bpf_struct_ops_link_create() fails to register. 817 */ 818 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); 819 bpf_map_put(&st_map->map); 820 } 821 kfree(st_link); 822 } 823 824 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link, 825 struct seq_file *seq) 826 { 827 struct bpf_struct_ops_link *st_link; 828 struct bpf_map *map; 829 830 st_link = container_of(link, struct bpf_struct_ops_link, link); 831 rcu_read_lock(); 832 map = rcu_dereference(st_link->map); 833 seq_printf(seq, "map_id:\t%d\n", map->id); 834 rcu_read_unlock(); 835 } 836 837 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link, 838 struct bpf_link_info *info) 839 { 840 struct bpf_struct_ops_link *st_link; 841 struct bpf_map *map; 842 843 st_link = container_of(link, struct bpf_struct_ops_link, link); 844 rcu_read_lock(); 845 map = rcu_dereference(st_link->map); 846 info->struct_ops.map_id = map->id; 847 rcu_read_unlock(); 848 return 0; 849 } 850 851 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map, 852 struct bpf_map *expected_old_map) 853 { 854 struct bpf_struct_ops_map *st_map, *old_st_map; 855 struct bpf_map *old_map; 856 struct bpf_struct_ops_link *st_link; 857 int err; 858 859 st_link = container_of(link, struct bpf_struct_ops_link, link); 860 st_map = container_of(new_map, struct bpf_struct_ops_map, map); 861 862 if (!bpf_struct_ops_valid_to_reg(new_map)) 863 return -EINVAL; 864 865 if (!st_map->st_ops_desc->st_ops->update) 866 return -EOPNOTSUPP; 867 868 mutex_lock(&update_mutex); 869 870 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); 871 if (expected_old_map && old_map != expected_old_map) { 872 err = -EPERM; 873 goto err_out; 874 } 875 876 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map); 877 /* The new and old struct_ops must be the same type. */ 878 if (st_map->st_ops_desc != old_st_map->st_ops_desc) { 879 err = -EINVAL; 880 goto err_out; 881 } 882 883 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); 884 if (err) 885 goto err_out; 886 887 bpf_map_inc(new_map); 888 rcu_assign_pointer(st_link->map, new_map); 889 bpf_map_put(old_map); 890 891 err_out: 892 mutex_unlock(&update_mutex); 893 894 return err; 895 } 896 897 static const struct bpf_link_ops bpf_struct_ops_map_lops = { 898 .dealloc = bpf_struct_ops_map_link_dealloc, 899 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, 900 .fill_link_info = bpf_struct_ops_map_link_fill_link_info, 901 .update_map = bpf_struct_ops_map_link_update, 902 }; 903 904 int bpf_struct_ops_link_create(union bpf_attr *attr) 905 { 906 struct bpf_struct_ops_link *link = NULL; 907 struct bpf_link_primer link_primer; 908 struct bpf_struct_ops_map *st_map; 909 struct bpf_map *map; 910 int err; 911 912 map = bpf_map_get(attr->link_create.map_fd); 913 if (IS_ERR(map)) 914 return PTR_ERR(map); 915 916 st_map = (struct bpf_struct_ops_map *)map; 917 918 if (!bpf_struct_ops_valid_to_reg(map)) { 919 err = -EINVAL; 920 goto err_out; 921 } 922 923 link = kzalloc(sizeof(*link), GFP_USER); 924 if (!link) { 925 err = -ENOMEM; 926 goto err_out; 927 } 928 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL); 929 930 err = bpf_link_prime(&link->link, &link_primer); 931 if (err) 932 goto err_out; 933 934 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data); 935 if (err) { 936 bpf_link_cleanup(&link_primer); 937 link = NULL; 938 goto err_out; 939 } 940 RCU_INIT_POINTER(link->map, map); 941 942 return bpf_link_settle(&link_primer); 943 944 err_out: 945 bpf_map_put(map); 946 kfree(link); 947 return err; 948 } 949 950 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) 951 { 952 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; 953 954 info->btf_vmlinux_id = btf_obj_id(st_map->btf); 955 } 956