1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_stack.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/proc.h> 41 #include <sys/sglist.h> 42 #include <sys/sleepqueue.h> 43 #include <sys/refcount.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/bus.h> 47 #include <sys/eventhandler.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/filio.h> 51 #include <sys/rwlock.h> 52 #include <sys/mman.h> 53 #include <sys/stack.h> 54 #include <sys/sysent.h> 55 #include <sys/time.h> 56 #include <sys/user.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 64 #include <machine/stdarg.h> 65 66 #if defined(__i386__) || defined(__amd64__) 67 #include <machine/md_var.h> 68 #endif 69 70 #include <linux/kobject.h> 71 #include <linux/cpu.h> 72 #include <linux/device.h> 73 #include <linux/slab.h> 74 #include <linux/module.h> 75 #include <linux/moduleparam.h> 76 #include <linux/cdev.h> 77 #include <linux/file.h> 78 #include <linux/sysfs.h> 79 #include <linux/mm.h> 80 #include <linux/io.h> 81 #include <linux/vmalloc.h> 82 #include <linux/netdevice.h> 83 #include <linux/timer.h> 84 #include <linux/interrupt.h> 85 #include <linux/uaccess.h> 86 #include <linux/list.h> 87 #include <linux/kthread.h> 88 #include <linux/kernel.h> 89 #include <linux/compat.h> 90 #include <linux/poll.h> 91 #include <linux/smp.h> 92 #include <linux/wait_bit.h> 93 #include <linux/rcupdate.h> 94 #include <linux/interval_tree.h> 95 #include <linux/interval_tree_generic.h> 96 97 #if defined(__i386__) || defined(__amd64__) 98 #include <asm/smp.h> 99 #endif 100 101 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 102 "LinuxKPI parameters"); 103 104 int linuxkpi_debug; 105 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, 106 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); 107 108 int linuxkpi_warn_dump_stack = 0; 109 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, 110 &linuxkpi_warn_dump_stack, 0, 111 "Set to enable stack traces from WARN_ON(). Clear to disable."); 112 113 static struct timeval lkpi_net_lastlog; 114 static int lkpi_net_curpps; 115 static int lkpi_net_maxpps = 99; 116 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, 117 &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); 118 119 MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); 120 121 #include <linux/rbtree.h> 122 /* Undo Linux compat changes. */ 123 #undef RB_ROOT 124 #undef file 125 #undef cdev 126 #define RB_ROOT(head) (head)->rbh_root 127 128 static void linux_destroy_dev(struct linux_cdev *); 129 static void linux_cdev_deref(struct linux_cdev *ldev); 130 static struct vm_area_struct *linux_cdev_handle_find(void *handle); 131 132 cpumask_t cpu_online_mask; 133 struct kobject linux_class_root; 134 struct device linux_root_device; 135 struct class linux_class_misc; 136 struct list_head pci_drivers; 137 struct list_head pci_devices; 138 spinlock_t pci_lock; 139 140 unsigned long linux_timer_hz_mask; 141 142 wait_queue_head_t linux_bit_waitq; 143 wait_queue_head_t linux_var_waitq; 144 145 int 146 panic_cmp(struct rb_node *one, struct rb_node *two) 147 { 148 panic("no cmp"); 149 } 150 151 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 152 153 #define START(node) ((node)->start) 154 #define LAST(node) ((node)->last) 155 156 INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, 157 LAST,, lkpi_interval_tree) 158 159 struct kobject * 160 kobject_create(void) 161 { 162 struct kobject *kobj; 163 164 kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); 165 if (kobj == NULL) 166 return (NULL); 167 kobject_init(kobj, &linux_kfree_type); 168 169 return (kobj); 170 } 171 172 173 int 174 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 175 { 176 va_list tmp_va; 177 int len; 178 char *old; 179 char *name; 180 char dummy; 181 182 old = kobj->name; 183 184 if (old && fmt == NULL) 185 return (0); 186 187 /* compute length of string */ 188 va_copy(tmp_va, args); 189 len = vsnprintf(&dummy, 0, fmt, tmp_va); 190 va_end(tmp_va); 191 192 /* account for zero termination */ 193 len++; 194 195 /* check for error */ 196 if (len < 1) 197 return (-EINVAL); 198 199 /* allocate memory for string */ 200 name = kzalloc(len, GFP_KERNEL); 201 if (name == NULL) 202 return (-ENOMEM); 203 vsnprintf(name, len, fmt, args); 204 kobj->name = name; 205 206 /* free old string */ 207 kfree(old); 208 209 /* filter new string */ 210 for (; *name != '\0'; name++) 211 if (*name == '/') 212 *name = '!'; 213 return (0); 214 } 215 216 int 217 kobject_set_name(struct kobject *kobj, const char *fmt, ...) 218 { 219 va_list args; 220 int error; 221 222 va_start(args, fmt); 223 error = kobject_set_name_vargs(kobj, fmt, args); 224 va_end(args); 225 226 return (error); 227 } 228 229 static int 230 kobject_add_complete(struct kobject *kobj, struct kobject *parent) 231 { 232 const struct kobj_type *t; 233 int error; 234 235 kobj->parent = parent; 236 error = sysfs_create_dir(kobj); 237 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 238 struct attribute **attr; 239 t = kobj->ktype; 240 241 for (attr = t->default_attrs; *attr != NULL; attr++) { 242 error = sysfs_create_file(kobj, *attr); 243 if (error) 244 break; 245 } 246 if (error) 247 sysfs_remove_dir(kobj); 248 } 249 return (error); 250 } 251 252 int 253 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 254 { 255 va_list args; 256 int error; 257 258 va_start(args, fmt); 259 error = kobject_set_name_vargs(kobj, fmt, args); 260 va_end(args); 261 if (error) 262 return (error); 263 264 return kobject_add_complete(kobj, parent); 265 } 266 267 void 268 linux_kobject_release(struct kref *kref) 269 { 270 struct kobject *kobj; 271 char *name; 272 273 kobj = container_of(kref, struct kobject, kref); 274 sysfs_remove_dir(kobj); 275 name = kobj->name; 276 if (kobj->ktype && kobj->ktype->release) 277 kobj->ktype->release(kobj); 278 kfree(name); 279 } 280 281 static void 282 linux_kobject_kfree(struct kobject *kobj) 283 { 284 kfree(kobj); 285 } 286 287 static void 288 linux_kobject_kfree_name(struct kobject *kobj) 289 { 290 if (kobj) { 291 kfree(kobj->name); 292 } 293 } 294 295 const struct kobj_type linux_kfree_type = { 296 .release = linux_kobject_kfree 297 }; 298 299 static ssize_t 300 lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) 301 { 302 struct kobj_attribute *ka = 303 container_of(attr, struct kobj_attribute, attr); 304 305 if (ka->show == NULL) 306 return (-EIO); 307 308 return (ka->show(kobj, ka, buf)); 309 } 310 311 static ssize_t 312 lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, 313 const char *buf, size_t count) 314 { 315 struct kobj_attribute *ka = 316 container_of(attr, struct kobj_attribute, attr); 317 318 if (ka->store == NULL) 319 return (-EIO); 320 321 return (ka->store(kobj, ka, buf, count)); 322 } 323 324 const struct sysfs_ops kobj_sysfs_ops = { 325 .show = lkpi_kobj_attr_show, 326 .store = lkpi_kobj_attr_store, 327 }; 328 329 static void 330 linux_device_release(struct device *dev) 331 { 332 pr_debug("linux_device_release: %s\n", dev_name(dev)); 333 kfree(dev); 334 } 335 336 static ssize_t 337 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 338 { 339 struct class_attribute *dattr; 340 ssize_t error; 341 342 dattr = container_of(attr, struct class_attribute, attr); 343 error = -EIO; 344 if (dattr->show) 345 error = dattr->show(container_of(kobj, struct class, kobj), 346 dattr, buf); 347 return (error); 348 } 349 350 static ssize_t 351 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 352 size_t count) 353 { 354 struct class_attribute *dattr; 355 ssize_t error; 356 357 dattr = container_of(attr, struct class_attribute, attr); 358 error = -EIO; 359 if (dattr->store) 360 error = dattr->store(container_of(kobj, struct class, kobj), 361 dattr, buf, count); 362 return (error); 363 } 364 365 static void 366 linux_class_release(struct kobject *kobj) 367 { 368 struct class *class; 369 370 class = container_of(kobj, struct class, kobj); 371 if (class->class_release) 372 class->class_release(class); 373 } 374 375 static const struct sysfs_ops linux_class_sysfs = { 376 .show = linux_class_show, 377 .store = linux_class_store, 378 }; 379 380 const struct kobj_type linux_class_ktype = { 381 .release = linux_class_release, 382 .sysfs_ops = &linux_class_sysfs 383 }; 384 385 static void 386 linux_dev_release(struct kobject *kobj) 387 { 388 struct device *dev; 389 390 dev = container_of(kobj, struct device, kobj); 391 /* This is the precedence defined by linux. */ 392 if (dev->release) 393 dev->release(dev); 394 else if (dev->class && dev->class->dev_release) 395 dev->class->dev_release(dev); 396 } 397 398 static ssize_t 399 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 400 { 401 struct device_attribute *dattr; 402 ssize_t error; 403 404 dattr = container_of(attr, struct device_attribute, attr); 405 error = -EIO; 406 if (dattr->show) 407 error = dattr->show(container_of(kobj, struct device, kobj), 408 dattr, buf); 409 return (error); 410 } 411 412 static ssize_t 413 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 414 size_t count) 415 { 416 struct device_attribute *dattr; 417 ssize_t error; 418 419 dattr = container_of(attr, struct device_attribute, attr); 420 error = -EIO; 421 if (dattr->store) 422 error = dattr->store(container_of(kobj, struct device, kobj), 423 dattr, buf, count); 424 return (error); 425 } 426 427 static const struct sysfs_ops linux_dev_sysfs = { 428 .show = linux_dev_show, 429 .store = linux_dev_store, 430 }; 431 432 const struct kobj_type linux_dev_ktype = { 433 .release = linux_dev_release, 434 .sysfs_ops = &linux_dev_sysfs 435 }; 436 437 struct device * 438 device_create(struct class *class, struct device *parent, dev_t devt, 439 void *drvdata, const char *fmt, ...) 440 { 441 struct device *dev; 442 va_list args; 443 444 dev = kzalloc(sizeof(*dev), M_WAITOK); 445 dev->parent = parent; 446 dev->class = class; 447 dev->devt = devt; 448 dev->driver_data = drvdata; 449 dev->release = linux_device_release; 450 va_start(args, fmt); 451 kobject_set_name_vargs(&dev->kobj, fmt, args); 452 va_end(args); 453 device_register(dev); 454 455 return (dev); 456 } 457 458 struct device * 459 device_create_groups_vargs(struct class *class, struct device *parent, 460 dev_t devt, void *drvdata, const struct attribute_group **groups, 461 const char *fmt, va_list args) 462 { 463 struct device *dev = NULL; 464 int retval = -ENODEV; 465 466 if (class == NULL || IS_ERR(class)) 467 goto error; 468 469 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 470 if (!dev) { 471 retval = -ENOMEM; 472 goto error; 473 } 474 475 dev->devt = devt; 476 dev->class = class; 477 dev->parent = parent; 478 dev->groups = groups; 479 dev->release = device_create_release; 480 /* device_initialize() needs the class and parent to be set */ 481 device_initialize(dev); 482 dev_set_drvdata(dev, drvdata); 483 484 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 485 if (retval) 486 goto error; 487 488 retval = device_add(dev); 489 if (retval) 490 goto error; 491 492 return dev; 493 494 error: 495 put_device(dev); 496 return ERR_PTR(retval); 497 } 498 499 struct class * 500 class_create(struct module *owner, const char *name) 501 { 502 struct class *class; 503 int error; 504 505 class = kzalloc(sizeof(*class), M_WAITOK); 506 class->owner = owner; 507 class->name = name; 508 class->class_release = linux_class_kfree; 509 error = class_register(class); 510 if (error) { 511 kfree(class); 512 return (NULL); 513 } 514 515 return (class); 516 } 517 518 int 519 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 520 struct kobject *parent, const char *fmt, ...) 521 { 522 va_list args; 523 int error; 524 525 kobject_init(kobj, ktype); 526 kobj->ktype = ktype; 527 kobj->parent = parent; 528 kobj->name = NULL; 529 530 va_start(args, fmt); 531 error = kobject_set_name_vargs(kobj, fmt, args); 532 va_end(args); 533 if (error) 534 return (error); 535 return kobject_add_complete(kobj, parent); 536 } 537 538 static void 539 linux_kq_lock(void *arg) 540 { 541 spinlock_t *s = arg; 542 543 spin_lock(s); 544 } 545 static void 546 linux_kq_unlock(void *arg) 547 { 548 spinlock_t *s = arg; 549 550 spin_unlock(s); 551 } 552 553 static void 554 linux_kq_assert_lock(void *arg, int what) 555 { 556 #ifdef INVARIANTS 557 spinlock_t *s = arg; 558 559 if (what == LA_LOCKED) 560 mtx_assert(&s->m, MA_OWNED); 561 else 562 mtx_assert(&s->m, MA_NOTOWNED); 563 #endif 564 } 565 566 static void 567 linux_file_kqfilter_poll(struct linux_file *, int); 568 569 struct linux_file * 570 linux_file_alloc(void) 571 { 572 struct linux_file *filp; 573 574 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 575 576 /* set initial refcount */ 577 filp->f_count = 1; 578 579 /* setup fields needed by kqueue support */ 580 spin_lock_init(&filp->f_kqlock); 581 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 582 linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); 583 584 return (filp); 585 } 586 587 void 588 linux_file_free(struct linux_file *filp) 589 { 590 if (filp->_file == NULL) { 591 if (filp->f_op != NULL && filp->f_op->release != NULL) 592 filp->f_op->release(filp->f_vnode, filp); 593 if (filp->f_shmem != NULL) 594 vm_object_deallocate(filp->f_shmem); 595 kfree_rcu(filp, rcu); 596 } else { 597 /* 598 * The close method of the character device or file 599 * will free the linux_file structure: 600 */ 601 _fdrop(filp->_file, curthread); 602 } 603 } 604 605 struct linux_cdev * 606 cdev_alloc(void) 607 { 608 struct linux_cdev *cdev; 609 610 cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); 611 kobject_init(&cdev->kobj, &linux_cdev_ktype); 612 cdev->refs = 1; 613 return (cdev); 614 } 615 616 static int 617 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 618 vm_page_t *mres) 619 { 620 struct vm_area_struct *vmap; 621 622 vmap = linux_cdev_handle_find(vm_obj->handle); 623 624 MPASS(vmap != NULL); 625 MPASS(vmap->vm_private_data == vm_obj->handle); 626 627 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 628 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 629 vm_page_t page; 630 631 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 632 /* 633 * If the passed in result page is a fake 634 * page, update it with the new physical 635 * address. 636 */ 637 page = *mres; 638 vm_page_updatefake(page, paddr, vm_obj->memattr); 639 } else { 640 /* 641 * Replace the passed in "mres" page with our 642 * own fake page and free up the all of the 643 * original pages. 644 */ 645 VM_OBJECT_WUNLOCK(vm_obj); 646 page = vm_page_getfake(paddr, vm_obj->memattr); 647 VM_OBJECT_WLOCK(vm_obj); 648 649 vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); 650 *mres = page; 651 } 652 vm_page_valid(page); 653 return (VM_PAGER_OK); 654 } 655 return (VM_PAGER_FAIL); 656 } 657 658 static int 659 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 660 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 661 { 662 struct vm_area_struct *vmap; 663 int err; 664 665 /* get VM area structure */ 666 vmap = linux_cdev_handle_find(vm_obj->handle); 667 MPASS(vmap != NULL); 668 MPASS(vmap->vm_private_data == vm_obj->handle); 669 670 VM_OBJECT_WUNLOCK(vm_obj); 671 672 linux_set_current(curthread); 673 674 down_write(&vmap->vm_mm->mmap_sem); 675 if (unlikely(vmap->vm_ops == NULL)) { 676 err = VM_FAULT_SIGBUS; 677 } else { 678 struct vm_fault vmf; 679 680 /* fill out VM fault structure */ 681 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); 682 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 683 vmf.pgoff = 0; 684 vmf.page = NULL; 685 vmf.vma = vmap; 686 687 vmap->vm_pfn_count = 0; 688 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 689 vmap->vm_obj = vm_obj; 690 691 err = vmap->vm_ops->fault(&vmf); 692 693 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 694 kern_yield(PRI_USER); 695 err = vmap->vm_ops->fault(&vmf); 696 } 697 } 698 699 /* translate return code */ 700 switch (err) { 701 case VM_FAULT_OOM: 702 err = VM_PAGER_AGAIN; 703 break; 704 case VM_FAULT_SIGBUS: 705 err = VM_PAGER_BAD; 706 break; 707 case VM_FAULT_NOPAGE: 708 /* 709 * By contract the fault handler will return having 710 * busied all the pages itself. If pidx is already 711 * found in the object, it will simply xbusy the first 712 * page and return with vm_pfn_count set to 1. 713 */ 714 *first = vmap->vm_pfn_first; 715 *last = *first + vmap->vm_pfn_count - 1; 716 err = VM_PAGER_OK; 717 break; 718 default: 719 err = VM_PAGER_ERROR; 720 break; 721 } 722 up_write(&vmap->vm_mm->mmap_sem); 723 VM_OBJECT_WLOCK(vm_obj); 724 return (err); 725 } 726 727 static struct rwlock linux_vma_lock; 728 static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 729 TAILQ_HEAD_INITIALIZER(linux_vma_head); 730 731 static void 732 linux_cdev_handle_free(struct vm_area_struct *vmap) 733 { 734 /* Drop reference on vm_file */ 735 if (vmap->vm_file != NULL) 736 fput(vmap->vm_file); 737 738 /* Drop reference on mm_struct */ 739 mmput(vmap->vm_mm); 740 741 kfree(vmap); 742 } 743 744 static void 745 linux_cdev_handle_remove(struct vm_area_struct *vmap) 746 { 747 rw_wlock(&linux_vma_lock); 748 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 749 rw_wunlock(&linux_vma_lock); 750 } 751 752 static struct vm_area_struct * 753 linux_cdev_handle_find(void *handle) 754 { 755 struct vm_area_struct *vmap; 756 757 rw_rlock(&linux_vma_lock); 758 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 759 if (vmap->vm_private_data == handle) 760 break; 761 } 762 rw_runlock(&linux_vma_lock); 763 return (vmap); 764 } 765 766 static int 767 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 768 vm_ooffset_t foff, struct ucred *cred, u_short *color) 769 { 770 771 MPASS(linux_cdev_handle_find(handle) != NULL); 772 *color = 0; 773 return (0); 774 } 775 776 static void 777 linux_cdev_pager_dtor(void *handle) 778 { 779 const struct vm_operations_struct *vm_ops; 780 struct vm_area_struct *vmap; 781 782 vmap = linux_cdev_handle_find(handle); 783 MPASS(vmap != NULL); 784 785 /* 786 * Remove handle before calling close operation to prevent 787 * other threads from reusing the handle pointer. 788 */ 789 linux_cdev_handle_remove(vmap); 790 791 down_write(&vmap->vm_mm->mmap_sem); 792 vm_ops = vmap->vm_ops; 793 if (likely(vm_ops != NULL)) 794 vm_ops->close(vmap); 795 up_write(&vmap->vm_mm->mmap_sem); 796 797 linux_cdev_handle_free(vmap); 798 } 799 800 static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 801 { 802 /* OBJT_MGTDEVICE */ 803 .cdev_pg_populate = linux_cdev_pager_populate, 804 .cdev_pg_ctor = linux_cdev_pager_ctor, 805 .cdev_pg_dtor = linux_cdev_pager_dtor 806 }, 807 { 808 /* OBJT_DEVICE */ 809 .cdev_pg_fault = linux_cdev_pager_fault, 810 .cdev_pg_ctor = linux_cdev_pager_ctor, 811 .cdev_pg_dtor = linux_cdev_pager_dtor 812 }, 813 }; 814 815 int 816 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 817 unsigned long size) 818 { 819 vm_object_t obj; 820 vm_page_t m; 821 822 obj = vma->vm_obj; 823 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) 824 return (-ENOTSUP); 825 VM_OBJECT_RLOCK(obj); 826 for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); 827 m != NULL && m->pindex < OFF_TO_IDX(address + size); 828 m = TAILQ_NEXT(m, listq)) 829 pmap_remove_all(m); 830 VM_OBJECT_RUNLOCK(obj); 831 return (0); 832 } 833 834 static struct file_operations dummy_ldev_ops = { 835 /* XXXKIB */ 836 }; 837 838 static struct linux_cdev dummy_ldev = { 839 .ops = &dummy_ldev_ops, 840 }; 841 842 #define LDEV_SI_DTR 0x0001 843 #define LDEV_SI_REF 0x0002 844 845 static void 846 linux_get_fop(struct linux_file *filp, const struct file_operations **fop, 847 struct linux_cdev **dev) 848 { 849 struct linux_cdev *ldev; 850 u_int siref; 851 852 ldev = filp->f_cdev; 853 *fop = filp->f_op; 854 if (ldev != NULL) { 855 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 856 refcount_acquire(&ldev->refs); 857 } else { 858 for (siref = ldev->siref;;) { 859 if ((siref & LDEV_SI_DTR) != 0) { 860 ldev = &dummy_ldev; 861 *fop = ldev->ops; 862 siref = ldev->siref; 863 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 864 } else if (atomic_fcmpset_int(&ldev->siref, 865 &siref, siref + LDEV_SI_REF)) { 866 break; 867 } 868 } 869 } 870 } 871 *dev = ldev; 872 } 873 874 static void 875 linux_drop_fop(struct linux_cdev *ldev) 876 { 877 878 if (ldev == NULL) 879 return; 880 if (ldev->kobj.ktype == &linux_cdev_static_ktype) { 881 linux_cdev_deref(ldev); 882 } else { 883 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 884 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); 885 atomic_subtract_int(&ldev->siref, LDEV_SI_REF); 886 } 887 } 888 889 #define OPW(fp,td,code) ({ \ 890 struct file *__fpop; \ 891 __typeof(code) __retval; \ 892 \ 893 __fpop = (td)->td_fpop; \ 894 (td)->td_fpop = (fp); \ 895 __retval = (code); \ 896 (td)->td_fpop = __fpop; \ 897 __retval; \ 898 }) 899 900 static int 901 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, 902 struct file *file) 903 { 904 struct linux_cdev *ldev; 905 struct linux_file *filp; 906 const struct file_operations *fop; 907 int error; 908 909 ldev = dev->si_drv1; 910 911 filp = linux_file_alloc(); 912 filp->f_dentry = &filp->f_dentry_store; 913 filp->f_op = ldev->ops; 914 filp->f_mode = file->f_flag; 915 filp->f_flags = file->f_flag; 916 filp->f_vnode = file->f_vnode; 917 filp->_file = file; 918 refcount_acquire(&ldev->refs); 919 filp->f_cdev = ldev; 920 921 linux_set_current(td); 922 linux_get_fop(filp, &fop, &ldev); 923 924 if (fop->open != NULL) { 925 error = -fop->open(file->f_vnode, filp); 926 if (error != 0) { 927 linux_drop_fop(ldev); 928 linux_cdev_deref(filp->f_cdev); 929 kfree(filp); 930 return (error); 931 } 932 } 933 934 /* hold on to the vnode - used for fstat() */ 935 vhold(filp->f_vnode); 936 937 /* release the file from devfs */ 938 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 939 linux_drop_fop(ldev); 940 return (ENXIO); 941 } 942 943 #define LINUX_IOCTL_MIN_PTR 0x10000UL 944 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 945 946 static inline int 947 linux_remap_address(void **uaddr, size_t len) 948 { 949 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 950 951 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 952 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 953 struct task_struct *pts = current; 954 if (pts == NULL) { 955 *uaddr = NULL; 956 return (1); 957 } 958 959 /* compute data offset */ 960 uaddr_val -= LINUX_IOCTL_MIN_PTR; 961 962 /* check that length is within bounds */ 963 if ((len > IOCPARM_MAX) || 964 (uaddr_val + len) > pts->bsd_ioctl_len) { 965 *uaddr = NULL; 966 return (1); 967 } 968 969 /* re-add kernel buffer address */ 970 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 971 972 /* update address location */ 973 *uaddr = (void *)uaddr_val; 974 return (1); 975 } 976 return (0); 977 } 978 979 int 980 linux_copyin(const void *uaddr, void *kaddr, size_t len) 981 { 982 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 983 if (uaddr == NULL) 984 return (-EFAULT); 985 memcpy(kaddr, uaddr, len); 986 return (0); 987 } 988 return (-copyin(uaddr, kaddr, len)); 989 } 990 991 int 992 linux_copyout(const void *kaddr, void *uaddr, size_t len) 993 { 994 if (linux_remap_address(&uaddr, len)) { 995 if (uaddr == NULL) 996 return (-EFAULT); 997 memcpy(uaddr, kaddr, len); 998 return (0); 999 } 1000 return (-copyout(kaddr, uaddr, len)); 1001 } 1002 1003 size_t 1004 linux_clear_user(void *_uaddr, size_t _len) 1005 { 1006 uint8_t *uaddr = _uaddr; 1007 size_t len = _len; 1008 1009 /* make sure uaddr is aligned before going into the fast loop */ 1010 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 1011 if (subyte(uaddr, 0)) 1012 return (_len); 1013 uaddr++; 1014 len--; 1015 } 1016 1017 /* zero 8 bytes at a time */ 1018 while (len > 7) { 1019 #ifdef __LP64__ 1020 if (suword64(uaddr, 0)) 1021 return (_len); 1022 #else 1023 if (suword32(uaddr, 0)) 1024 return (_len); 1025 if (suword32(uaddr + 4, 0)) 1026 return (_len); 1027 #endif 1028 uaddr += 8; 1029 len -= 8; 1030 } 1031 1032 /* zero fill end, if any */ 1033 while (len > 0) { 1034 if (subyte(uaddr, 0)) 1035 return (_len); 1036 uaddr++; 1037 len--; 1038 } 1039 return (0); 1040 } 1041 1042 int 1043 linux_access_ok(const void *uaddr, size_t len) 1044 { 1045 uintptr_t saddr; 1046 uintptr_t eaddr; 1047 1048 /* get start and end address */ 1049 saddr = (uintptr_t)uaddr; 1050 eaddr = (uintptr_t)uaddr + len; 1051 1052 /* verify addresses are valid for userspace */ 1053 return ((saddr == eaddr) || 1054 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 1055 } 1056 1057 /* 1058 * This function should return either EINTR or ERESTART depending on 1059 * the signal type sent to this thread: 1060 */ 1061 static int 1062 linux_get_error(struct task_struct *task, int error) 1063 { 1064 /* check for signal type interrupt code */ 1065 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { 1066 error = -linux_schedule_get_interrupt_value(task); 1067 if (error == 0) 1068 error = EINTR; 1069 } 1070 return (error); 1071 } 1072 1073 static int 1074 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 1075 const struct file_operations *fop, u_long cmd, caddr_t data, 1076 struct thread *td) 1077 { 1078 struct task_struct *task = current; 1079 unsigned size; 1080 int error; 1081 1082 size = IOCPARM_LEN(cmd); 1083 /* refer to logic in sys_ioctl() */ 1084 if (size > 0) { 1085 /* 1086 * Setup hint for linux_copyin() and linux_copyout(). 1087 * 1088 * Background: Linux code expects a user-space address 1089 * while FreeBSD supplies a kernel-space address. 1090 */ 1091 task->bsd_ioctl_data = data; 1092 task->bsd_ioctl_len = size; 1093 data = (void *)LINUX_IOCTL_MIN_PTR; 1094 } else { 1095 /* fetch user-space pointer */ 1096 data = *(void **)data; 1097 } 1098 #ifdef COMPAT_FREEBSD32 1099 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 1100 /* try the compat IOCTL handler first */ 1101 if (fop->compat_ioctl != NULL) { 1102 error = -OPW(fp, td, fop->compat_ioctl(filp, 1103 cmd, (u_long)data)); 1104 } else { 1105 error = ENOTTY; 1106 } 1107 1108 /* fallback to the regular IOCTL handler, if any */ 1109 if (error == ENOTTY && fop->unlocked_ioctl != NULL) { 1110 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1111 cmd, (u_long)data)); 1112 } 1113 } else 1114 #endif 1115 { 1116 if (fop->unlocked_ioctl != NULL) { 1117 error = -OPW(fp, td, fop->unlocked_ioctl(filp, 1118 cmd, (u_long)data)); 1119 } else { 1120 error = ENOTTY; 1121 } 1122 } 1123 if (size > 0) { 1124 task->bsd_ioctl_data = NULL; 1125 task->bsd_ioctl_len = 0; 1126 } 1127 1128 if (error == EWOULDBLOCK) { 1129 /* update kqfilter status, if any */ 1130 linux_file_kqfilter_poll(filp, 1131 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1132 } else { 1133 error = linux_get_error(task, error); 1134 } 1135 return (error); 1136 } 1137 1138 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 1139 1140 /* 1141 * This function atomically updates the poll wakeup state and returns 1142 * the previous state at the time of update. 1143 */ 1144 static uint8_t 1145 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 1146 { 1147 int c, old; 1148 1149 c = v->counter; 1150 1151 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 1152 c = old; 1153 1154 return (c); 1155 } 1156 1157 static int 1158 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 1159 { 1160 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1161 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1162 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1163 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 1164 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 1165 }; 1166 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 1167 1168 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1169 case LINUX_FWQ_STATE_QUEUED: 1170 linux_poll_wakeup(filp); 1171 return (1); 1172 default: 1173 return (0); 1174 } 1175 } 1176 1177 void 1178 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 1179 { 1180 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1181 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 1182 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 1183 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 1184 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 1185 }; 1186 1187 /* check if we are called inside the select system call */ 1188 if (p == LINUX_POLL_TABLE_NORMAL) 1189 selrecord(curthread, &filp->f_selinfo); 1190 1191 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1192 case LINUX_FWQ_STATE_INIT: 1193 /* NOTE: file handles can only belong to one wait-queue */ 1194 filp->f_wait_queue.wqh = wqh; 1195 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 1196 add_wait_queue(wqh, &filp->f_wait_queue.wq); 1197 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 1198 break; 1199 default: 1200 break; 1201 } 1202 } 1203 1204 static void 1205 linux_poll_wait_dequeue(struct linux_file *filp) 1206 { 1207 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 1208 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 1209 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 1210 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 1211 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 1212 }; 1213 1214 seldrain(&filp->f_selinfo); 1215 1216 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 1217 case LINUX_FWQ_STATE_NOT_READY: 1218 case LINUX_FWQ_STATE_QUEUED: 1219 case LINUX_FWQ_STATE_READY: 1220 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 1221 break; 1222 default: 1223 break; 1224 } 1225 } 1226 1227 void 1228 linux_poll_wakeup(struct linux_file *filp) 1229 { 1230 /* this function should be NULL-safe */ 1231 if (filp == NULL) 1232 return; 1233 1234 selwakeup(&filp->f_selinfo); 1235 1236 spin_lock(&filp->f_kqlock); 1237 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 1238 LINUX_KQ_FLAG_NEED_WRITE; 1239 1240 /* make sure the "knote" gets woken up */ 1241 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 1242 spin_unlock(&filp->f_kqlock); 1243 } 1244 1245 static void 1246 linux_file_kqfilter_detach(struct knote *kn) 1247 { 1248 struct linux_file *filp = kn->kn_hook; 1249 1250 spin_lock(&filp->f_kqlock); 1251 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1252 spin_unlock(&filp->f_kqlock); 1253 } 1254 1255 static int 1256 linux_file_kqfilter_read_event(struct knote *kn, long hint) 1257 { 1258 struct linux_file *filp = kn->kn_hook; 1259 1260 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1261 1262 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1263 } 1264 1265 static int 1266 linux_file_kqfilter_write_event(struct knote *kn, long hint) 1267 { 1268 struct linux_file *filp = kn->kn_hook; 1269 1270 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1271 1272 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1273 } 1274 1275 static struct filterops linux_dev_kqfiltops_read = { 1276 .f_isfd = 1, 1277 .f_detach = linux_file_kqfilter_detach, 1278 .f_event = linux_file_kqfilter_read_event, 1279 }; 1280 1281 static struct filterops linux_dev_kqfiltops_write = { 1282 .f_isfd = 1, 1283 .f_detach = linux_file_kqfilter_detach, 1284 .f_event = linux_file_kqfilter_write_event, 1285 }; 1286 1287 static void 1288 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1289 { 1290 struct thread *td; 1291 const struct file_operations *fop; 1292 struct linux_cdev *ldev; 1293 int temp; 1294 1295 if ((filp->f_kqflags & kqflags) == 0) 1296 return; 1297 1298 td = curthread; 1299 1300 linux_get_fop(filp, &fop, &ldev); 1301 /* get the latest polling state */ 1302 temp = OPW(filp->_file, td, fop->poll(filp, NULL)); 1303 linux_drop_fop(ldev); 1304 1305 spin_lock(&filp->f_kqlock); 1306 /* clear kqflags */ 1307 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1308 LINUX_KQ_FLAG_NEED_WRITE); 1309 /* update kqflags */ 1310 if ((temp & (POLLIN | POLLOUT)) != 0) { 1311 if ((temp & POLLIN) != 0) 1312 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1313 if ((temp & POLLOUT) != 0) 1314 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1315 1316 /* make sure the "knote" gets woken up */ 1317 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1318 } 1319 spin_unlock(&filp->f_kqlock); 1320 } 1321 1322 static int 1323 linux_file_kqfilter(struct file *file, struct knote *kn) 1324 { 1325 struct linux_file *filp; 1326 struct thread *td; 1327 int error; 1328 1329 td = curthread; 1330 filp = (struct linux_file *)file->f_data; 1331 filp->f_flags = file->f_flag; 1332 if (filp->f_op->poll == NULL) 1333 return (EINVAL); 1334 1335 spin_lock(&filp->f_kqlock); 1336 switch (kn->kn_filter) { 1337 case EVFILT_READ: 1338 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1339 kn->kn_fop = &linux_dev_kqfiltops_read; 1340 kn->kn_hook = filp; 1341 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1342 error = 0; 1343 break; 1344 case EVFILT_WRITE: 1345 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1346 kn->kn_fop = &linux_dev_kqfiltops_write; 1347 kn->kn_hook = filp; 1348 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1349 error = 0; 1350 break; 1351 default: 1352 error = EINVAL; 1353 break; 1354 } 1355 spin_unlock(&filp->f_kqlock); 1356 1357 if (error == 0) { 1358 linux_set_current(td); 1359 1360 /* update kqfilter status, if any */ 1361 linux_file_kqfilter_poll(filp, 1362 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1363 } 1364 return (error); 1365 } 1366 1367 static int 1368 linux_file_mmap_single(struct file *fp, const struct file_operations *fop, 1369 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, 1370 int nprot, bool is_shared, struct thread *td) 1371 { 1372 struct task_struct *task; 1373 struct vm_area_struct *vmap; 1374 struct mm_struct *mm; 1375 struct linux_file *filp; 1376 vm_memattr_t attr; 1377 int error; 1378 1379 filp = (struct linux_file *)fp->f_data; 1380 filp->f_flags = fp->f_flag; 1381 1382 if (fop->mmap == NULL) 1383 return (EOPNOTSUPP); 1384 1385 linux_set_current(td); 1386 1387 /* 1388 * The same VM object might be shared by multiple processes 1389 * and the mm_struct is usually freed when a process exits. 1390 * 1391 * The atomic reference below makes sure the mm_struct is 1392 * available as long as the vmap is in the linux_vma_head. 1393 */ 1394 task = current; 1395 mm = task->mm; 1396 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1397 return (EINVAL); 1398 1399 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1400 vmap->vm_start = 0; 1401 vmap->vm_end = size; 1402 vmap->vm_pgoff = *offset / PAGE_SIZE; 1403 vmap->vm_pfn = 0; 1404 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1405 if (is_shared) 1406 vmap->vm_flags |= VM_SHARED; 1407 vmap->vm_ops = NULL; 1408 vmap->vm_file = get_file(filp); 1409 vmap->vm_mm = mm; 1410 1411 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1412 error = linux_get_error(task, EINTR); 1413 } else { 1414 error = -OPW(fp, td, fop->mmap(filp, vmap)); 1415 error = linux_get_error(task, error); 1416 up_write(&vmap->vm_mm->mmap_sem); 1417 } 1418 1419 if (error != 0) { 1420 linux_cdev_handle_free(vmap); 1421 return (error); 1422 } 1423 1424 attr = pgprot2cachemode(vmap->vm_page_prot); 1425 1426 if (vmap->vm_ops != NULL) { 1427 struct vm_area_struct *ptr; 1428 void *vm_private_data; 1429 bool vm_no_fault; 1430 1431 if (vmap->vm_ops->open == NULL || 1432 vmap->vm_ops->close == NULL || 1433 vmap->vm_private_data == NULL) { 1434 /* free allocated VM area struct */ 1435 linux_cdev_handle_free(vmap); 1436 return (EINVAL); 1437 } 1438 1439 vm_private_data = vmap->vm_private_data; 1440 1441 rw_wlock(&linux_vma_lock); 1442 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1443 if (ptr->vm_private_data == vm_private_data) 1444 break; 1445 } 1446 /* check if there is an existing VM area struct */ 1447 if (ptr != NULL) { 1448 /* check if the VM area structure is invalid */ 1449 if (ptr->vm_ops == NULL || 1450 ptr->vm_ops->open == NULL || 1451 ptr->vm_ops->close == NULL) { 1452 error = ESTALE; 1453 vm_no_fault = 1; 1454 } else { 1455 error = EEXIST; 1456 vm_no_fault = (ptr->vm_ops->fault == NULL); 1457 } 1458 } else { 1459 /* insert VM area structure into list */ 1460 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1461 error = 0; 1462 vm_no_fault = (vmap->vm_ops->fault == NULL); 1463 } 1464 rw_wunlock(&linux_vma_lock); 1465 1466 if (error != 0) { 1467 /* free allocated VM area struct */ 1468 linux_cdev_handle_free(vmap); 1469 /* check for stale VM area struct */ 1470 if (error != EEXIST) 1471 return (error); 1472 } 1473 1474 /* check if there is no fault handler */ 1475 if (vm_no_fault) { 1476 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1477 &linux_cdev_pager_ops[1], size, nprot, *offset, 1478 td->td_ucred); 1479 } else { 1480 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1481 &linux_cdev_pager_ops[0], size, nprot, *offset, 1482 td->td_ucred); 1483 } 1484 1485 /* check if allocating the VM object failed */ 1486 if (*object == NULL) { 1487 if (error == 0) { 1488 /* remove VM area struct from list */ 1489 linux_cdev_handle_remove(vmap); 1490 /* free allocated VM area struct */ 1491 linux_cdev_handle_free(vmap); 1492 } 1493 return (EINVAL); 1494 } 1495 } else { 1496 struct sglist *sg; 1497 1498 sg = sglist_alloc(1, M_WAITOK); 1499 sglist_append_phys(sg, 1500 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1501 1502 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1503 nprot, 0, td->td_ucred); 1504 1505 linux_cdev_handle_free(vmap); 1506 1507 if (*object == NULL) { 1508 sglist_free(sg); 1509 return (EINVAL); 1510 } 1511 } 1512 1513 if (attr != VM_MEMATTR_DEFAULT) { 1514 VM_OBJECT_WLOCK(*object); 1515 vm_object_set_memattr(*object, attr); 1516 VM_OBJECT_WUNLOCK(*object); 1517 } 1518 *offset = 0; 1519 return (0); 1520 } 1521 1522 struct cdevsw linuxcdevsw = { 1523 .d_version = D_VERSION, 1524 .d_fdopen = linux_dev_fdopen, 1525 .d_name = "lkpidev", 1526 }; 1527 1528 static int 1529 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1530 int flags, struct thread *td) 1531 { 1532 struct linux_file *filp; 1533 const struct file_operations *fop; 1534 struct linux_cdev *ldev; 1535 ssize_t bytes; 1536 int error; 1537 1538 error = 0; 1539 filp = (struct linux_file *)file->f_data; 1540 filp->f_flags = file->f_flag; 1541 /* XXX no support for I/O vectors currently */ 1542 if (uio->uio_iovcnt != 1) 1543 return (EOPNOTSUPP); 1544 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1545 return (EINVAL); 1546 linux_set_current(td); 1547 linux_get_fop(filp, &fop, &ldev); 1548 if (fop->read != NULL) { 1549 bytes = OPW(file, td, fop->read(filp, 1550 uio->uio_iov->iov_base, 1551 uio->uio_iov->iov_len, &uio->uio_offset)); 1552 if (bytes >= 0) { 1553 uio->uio_iov->iov_base = 1554 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1555 uio->uio_iov->iov_len -= bytes; 1556 uio->uio_resid -= bytes; 1557 } else { 1558 error = linux_get_error(current, -bytes); 1559 } 1560 } else 1561 error = ENXIO; 1562 1563 /* update kqfilter status, if any */ 1564 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1565 linux_drop_fop(ldev); 1566 1567 return (error); 1568 } 1569 1570 static int 1571 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1572 int flags, struct thread *td) 1573 { 1574 struct linux_file *filp; 1575 const struct file_operations *fop; 1576 struct linux_cdev *ldev; 1577 ssize_t bytes; 1578 int error; 1579 1580 filp = (struct linux_file *)file->f_data; 1581 filp->f_flags = file->f_flag; 1582 /* XXX no support for I/O vectors currently */ 1583 if (uio->uio_iovcnt != 1) 1584 return (EOPNOTSUPP); 1585 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1586 return (EINVAL); 1587 linux_set_current(td); 1588 linux_get_fop(filp, &fop, &ldev); 1589 if (fop->write != NULL) { 1590 bytes = OPW(file, td, fop->write(filp, 1591 uio->uio_iov->iov_base, 1592 uio->uio_iov->iov_len, &uio->uio_offset)); 1593 if (bytes >= 0) { 1594 uio->uio_iov->iov_base = 1595 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1596 uio->uio_iov->iov_len -= bytes; 1597 uio->uio_resid -= bytes; 1598 error = 0; 1599 } else { 1600 error = linux_get_error(current, -bytes); 1601 } 1602 } else 1603 error = ENXIO; 1604 1605 /* update kqfilter status, if any */ 1606 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1607 1608 linux_drop_fop(ldev); 1609 1610 return (error); 1611 } 1612 1613 static int 1614 linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1615 struct thread *td) 1616 { 1617 struct linux_file *filp; 1618 const struct file_operations *fop; 1619 struct linux_cdev *ldev; 1620 int revents; 1621 1622 filp = (struct linux_file *)file->f_data; 1623 filp->f_flags = file->f_flag; 1624 linux_set_current(td); 1625 linux_get_fop(filp, &fop, &ldev); 1626 if (fop->poll != NULL) { 1627 revents = OPW(file, td, fop->poll(filp, 1628 LINUX_POLL_TABLE_NORMAL)) & events; 1629 } else { 1630 revents = 0; 1631 } 1632 linux_drop_fop(ldev); 1633 return (revents); 1634 } 1635 1636 static int 1637 linux_file_close(struct file *file, struct thread *td) 1638 { 1639 struct linux_file *filp; 1640 int (*release)(struct inode *, struct linux_file *); 1641 const struct file_operations *fop; 1642 struct linux_cdev *ldev; 1643 int error; 1644 1645 filp = (struct linux_file *)file->f_data; 1646 1647 KASSERT(file_count(filp) == 0, 1648 ("File refcount(%d) is not zero", file_count(filp))); 1649 1650 if (td == NULL) 1651 td = curthread; 1652 1653 error = 0; 1654 filp->f_flags = file->f_flag; 1655 linux_set_current(td); 1656 linux_poll_wait_dequeue(filp); 1657 linux_get_fop(filp, &fop, &ldev); 1658 /* 1659 * Always use the real release function, if any, to avoid 1660 * leaking device resources: 1661 */ 1662 release = filp->f_op->release; 1663 if (release != NULL) 1664 error = -OPW(file, td, release(filp->f_vnode, filp)); 1665 funsetown(&filp->f_sigio); 1666 if (filp->f_vnode != NULL) 1667 vdrop(filp->f_vnode); 1668 linux_drop_fop(ldev); 1669 ldev = filp->f_cdev; 1670 if (ldev != NULL) 1671 linux_cdev_deref(ldev); 1672 linux_synchronize_rcu(RCU_TYPE_REGULAR); 1673 kfree(filp); 1674 1675 return (error); 1676 } 1677 1678 static int 1679 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1680 struct thread *td) 1681 { 1682 struct linux_file *filp; 1683 const struct file_operations *fop; 1684 struct linux_cdev *ldev; 1685 struct fiodgname_arg *fgn; 1686 const char *p; 1687 int error, i; 1688 1689 error = 0; 1690 filp = (struct linux_file *)fp->f_data; 1691 filp->f_flags = fp->f_flag; 1692 linux_get_fop(filp, &fop, &ldev); 1693 1694 linux_set_current(td); 1695 switch (cmd) { 1696 case FIONBIO: 1697 break; 1698 case FIOASYNC: 1699 if (fop->fasync == NULL) 1700 break; 1701 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); 1702 break; 1703 case FIOSETOWN: 1704 error = fsetown(*(int *)data, &filp->f_sigio); 1705 if (error == 0) { 1706 if (fop->fasync == NULL) 1707 break; 1708 error = -OPW(fp, td, fop->fasync(0, filp, 1709 fp->f_flag & FASYNC)); 1710 } 1711 break; 1712 case FIOGETOWN: 1713 *(int *)data = fgetown(&filp->f_sigio); 1714 break; 1715 case FIODGNAME: 1716 #ifdef COMPAT_FREEBSD32 1717 case FIODGNAME_32: 1718 #endif 1719 if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { 1720 error = ENXIO; 1721 break; 1722 } 1723 fgn = data; 1724 p = devtoname(filp->f_cdev->cdev); 1725 i = strlen(p) + 1; 1726 if (i > fgn->len) { 1727 error = EINVAL; 1728 break; 1729 } 1730 error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); 1731 break; 1732 default: 1733 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); 1734 break; 1735 } 1736 linux_drop_fop(ldev); 1737 return (error); 1738 } 1739 1740 static int 1741 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1742 vm_prot_t maxprot, int flags, struct file *fp, 1743 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) 1744 { 1745 /* 1746 * Character devices do not provide private mappings 1747 * of any kind: 1748 */ 1749 if ((maxprot & VM_PROT_WRITE) == 0 && 1750 (prot & VM_PROT_WRITE) != 0) 1751 return (EACCES); 1752 if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) 1753 return (EINVAL); 1754 1755 return (linux_file_mmap_single(fp, fop, foff, objsize, objp, 1756 (int)prot, (flags & MAP_SHARED) ? true : false, td)); 1757 } 1758 1759 static int 1760 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1761 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1762 struct thread *td) 1763 { 1764 struct linux_file *filp; 1765 const struct file_operations *fop; 1766 struct linux_cdev *ldev; 1767 struct mount *mp; 1768 struct vnode *vp; 1769 vm_object_t object; 1770 vm_prot_t maxprot; 1771 int error; 1772 1773 filp = (struct linux_file *)fp->f_data; 1774 1775 vp = filp->f_vnode; 1776 if (vp == NULL) 1777 return (EOPNOTSUPP); 1778 1779 /* 1780 * Ensure that file and memory protections are 1781 * compatible. 1782 */ 1783 mp = vp->v_mount; 1784 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1785 maxprot = VM_PROT_NONE; 1786 if ((prot & VM_PROT_EXECUTE) != 0) 1787 return (EACCES); 1788 } else 1789 maxprot = VM_PROT_EXECUTE; 1790 if ((fp->f_flag & FREAD) != 0) 1791 maxprot |= VM_PROT_READ; 1792 else if ((prot & VM_PROT_READ) != 0) 1793 return (EACCES); 1794 1795 /* 1796 * If we are sharing potential changes via MAP_SHARED and we 1797 * are trying to get write permission although we opened it 1798 * without asking for it, bail out. 1799 * 1800 * Note that most character devices always share mappings. 1801 * 1802 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1803 * requests rather than doing it here. 1804 */ 1805 if ((flags & MAP_SHARED) != 0) { 1806 if ((fp->f_flag & FWRITE) != 0) 1807 maxprot |= VM_PROT_WRITE; 1808 else if ((prot & VM_PROT_WRITE) != 0) 1809 return (EACCES); 1810 } 1811 maxprot &= cap_maxprot; 1812 1813 linux_get_fop(filp, &fop, &ldev); 1814 error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, 1815 &foff, fop, &object); 1816 if (error != 0) 1817 goto out; 1818 1819 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1820 foff, FALSE, td); 1821 if (error != 0) 1822 vm_object_deallocate(object); 1823 out: 1824 linux_drop_fop(ldev); 1825 return (error); 1826 } 1827 1828 static int 1829 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) 1830 { 1831 struct linux_file *filp; 1832 struct vnode *vp; 1833 int error; 1834 1835 filp = (struct linux_file *)fp->f_data; 1836 if (filp->f_vnode == NULL) 1837 return (EOPNOTSUPP); 1838 1839 vp = filp->f_vnode; 1840 1841 vn_lock(vp, LK_SHARED | LK_RETRY); 1842 error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); 1843 VOP_UNLOCK(vp); 1844 1845 return (error); 1846 } 1847 1848 static int 1849 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1850 struct filedesc *fdp) 1851 { 1852 struct linux_file *filp; 1853 struct vnode *vp; 1854 int error; 1855 1856 filp = fp->f_data; 1857 vp = filp->f_vnode; 1858 if (vp == NULL) { 1859 error = 0; 1860 kif->kf_type = KF_TYPE_DEV; 1861 } else { 1862 vref(vp); 1863 FILEDESC_SUNLOCK(fdp); 1864 error = vn_fill_kinfo_vnode(vp, kif); 1865 vrele(vp); 1866 kif->kf_type = KF_TYPE_VNODE; 1867 FILEDESC_SLOCK(fdp); 1868 } 1869 return (error); 1870 } 1871 1872 unsigned int 1873 linux_iminor(struct inode *inode) 1874 { 1875 struct linux_cdev *ldev; 1876 1877 if (inode == NULL || inode->v_rdev == NULL || 1878 inode->v_rdev->si_devsw != &linuxcdevsw) 1879 return (-1U); 1880 ldev = inode->v_rdev->si_drv1; 1881 if (ldev == NULL) 1882 return (-1U); 1883 1884 return (minor(ldev->dev)); 1885 } 1886 1887 struct fileops linuxfileops = { 1888 .fo_read = linux_file_read, 1889 .fo_write = linux_file_write, 1890 .fo_truncate = invfo_truncate, 1891 .fo_kqfilter = linux_file_kqfilter, 1892 .fo_stat = linux_file_stat, 1893 .fo_fill_kinfo = linux_file_fill_kinfo, 1894 .fo_poll = linux_file_poll, 1895 .fo_close = linux_file_close, 1896 .fo_ioctl = linux_file_ioctl, 1897 .fo_mmap = linux_file_mmap, 1898 .fo_chmod = invfo_chmod, 1899 .fo_chown = invfo_chown, 1900 .fo_sendfile = invfo_sendfile, 1901 .fo_flags = DFLAG_PASSABLE, 1902 }; 1903 1904 /* 1905 * Hash of vmmap addresses. This is infrequently accessed and does not 1906 * need to be particularly large. This is done because we must store the 1907 * caller's idea of the map size to properly unmap. 1908 */ 1909 struct vmmap { 1910 LIST_ENTRY(vmmap) vm_next; 1911 void *vm_addr; 1912 unsigned long vm_size; 1913 }; 1914 1915 struct vmmaphd { 1916 struct vmmap *lh_first; 1917 }; 1918 #define VMMAP_HASH_SIZE 64 1919 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1920 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1921 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1922 static struct mtx vmmaplock; 1923 1924 static void 1925 vmmap_add(void *addr, unsigned long size) 1926 { 1927 struct vmmap *vmmap; 1928 1929 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1930 mtx_lock(&vmmaplock); 1931 vmmap->vm_size = size; 1932 vmmap->vm_addr = addr; 1933 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1934 mtx_unlock(&vmmaplock); 1935 } 1936 1937 static struct vmmap * 1938 vmmap_remove(void *addr) 1939 { 1940 struct vmmap *vmmap; 1941 1942 mtx_lock(&vmmaplock); 1943 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1944 if (vmmap->vm_addr == addr) 1945 break; 1946 if (vmmap) 1947 LIST_REMOVE(vmmap, vm_next); 1948 mtx_unlock(&vmmaplock); 1949 1950 return (vmmap); 1951 } 1952 1953 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1954 void * 1955 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1956 { 1957 void *addr; 1958 1959 addr = pmap_mapdev_attr(phys_addr, size, attr); 1960 if (addr == NULL) 1961 return (NULL); 1962 vmmap_add(addr, size); 1963 1964 return (addr); 1965 } 1966 #endif 1967 1968 void 1969 iounmap(void *addr) 1970 { 1971 struct vmmap *vmmap; 1972 1973 vmmap = vmmap_remove(addr); 1974 if (vmmap == NULL) 1975 return; 1976 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 1977 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1978 #endif 1979 kfree(vmmap); 1980 } 1981 1982 void * 1983 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1984 { 1985 vm_offset_t off; 1986 size_t size; 1987 1988 size = count * PAGE_SIZE; 1989 off = kva_alloc(size); 1990 if (off == 0) 1991 return (NULL); 1992 vmmap_add((void *)off, size); 1993 pmap_qenter(off, pages, count); 1994 1995 return ((void *)off); 1996 } 1997 1998 void 1999 vunmap(void *addr) 2000 { 2001 struct vmmap *vmmap; 2002 2003 vmmap = vmmap_remove(addr); 2004 if (vmmap == NULL) 2005 return; 2006 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 2007 kva_free((vm_offset_t)addr, vmmap->vm_size); 2008 kfree(vmmap); 2009 } 2010 2011 static char * 2012 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) 2013 { 2014 unsigned int len; 2015 char *p; 2016 va_list aq; 2017 2018 va_copy(aq, ap); 2019 len = vsnprintf(NULL, 0, fmt, aq); 2020 va_end(aq); 2021 2022 if (dev != NULL) 2023 p = devm_kmalloc(dev, len + 1, gfp); 2024 else 2025 p = kmalloc(len + 1, gfp); 2026 if (p != NULL) 2027 vsnprintf(p, len + 1, fmt, ap); 2028 2029 return (p); 2030 } 2031 2032 char * 2033 kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 2034 { 2035 2036 return (devm_kvasprintf(NULL, gfp, fmt, ap)); 2037 } 2038 2039 char * 2040 lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 2041 { 2042 va_list ap; 2043 char *p; 2044 2045 va_start(ap, fmt); 2046 p = devm_kvasprintf(dev, gfp, fmt, ap); 2047 va_end(ap); 2048 2049 return (p); 2050 } 2051 2052 char * 2053 kasprintf(gfp_t gfp, const char *fmt, ...) 2054 { 2055 va_list ap; 2056 char *p; 2057 2058 va_start(ap, fmt); 2059 p = kvasprintf(gfp, fmt, ap); 2060 va_end(ap); 2061 2062 return (p); 2063 } 2064 2065 static void 2066 linux_timer_callback_wrapper(void *context) 2067 { 2068 struct timer_list *timer; 2069 2070 timer = context; 2071 2072 if (linux_set_current_flags(curthread, M_NOWAIT)) { 2073 /* try again later */ 2074 callout_reset(&timer->callout, 1, 2075 &linux_timer_callback_wrapper, timer); 2076 return; 2077 } 2078 2079 timer->function(timer->data); 2080 } 2081 2082 int 2083 mod_timer(struct timer_list *timer, int expires) 2084 { 2085 int ret; 2086 2087 timer->expires = expires; 2088 ret = callout_reset(&timer->callout, 2089 linux_timer_jiffies_until(expires), 2090 &linux_timer_callback_wrapper, timer); 2091 2092 MPASS(ret == 0 || ret == 1); 2093 2094 return (ret == 1); 2095 } 2096 2097 void 2098 add_timer(struct timer_list *timer) 2099 { 2100 2101 callout_reset(&timer->callout, 2102 linux_timer_jiffies_until(timer->expires), 2103 &linux_timer_callback_wrapper, timer); 2104 } 2105 2106 void 2107 add_timer_on(struct timer_list *timer, int cpu) 2108 { 2109 2110 callout_reset_on(&timer->callout, 2111 linux_timer_jiffies_until(timer->expires), 2112 &linux_timer_callback_wrapper, timer, cpu); 2113 } 2114 2115 int 2116 del_timer(struct timer_list *timer) 2117 { 2118 2119 if (callout_stop(&(timer)->callout) == -1) 2120 return (0); 2121 return (1); 2122 } 2123 2124 int 2125 del_timer_sync(struct timer_list *timer) 2126 { 2127 2128 if (callout_drain(&(timer)->callout) == -1) 2129 return (0); 2130 return (1); 2131 } 2132 2133 /* greatest common divisor, Euclid equation */ 2134 static uint64_t 2135 lkpi_gcd_64(uint64_t a, uint64_t b) 2136 { 2137 uint64_t an; 2138 uint64_t bn; 2139 2140 while (b != 0) { 2141 an = b; 2142 bn = a % b; 2143 a = an; 2144 b = bn; 2145 } 2146 return (a); 2147 } 2148 2149 uint64_t lkpi_nsec2hz_rem; 2150 uint64_t lkpi_nsec2hz_div = 1000000000ULL; 2151 uint64_t lkpi_nsec2hz_max; 2152 2153 uint64_t lkpi_usec2hz_rem; 2154 uint64_t lkpi_usec2hz_div = 1000000ULL; 2155 uint64_t lkpi_usec2hz_max; 2156 2157 uint64_t lkpi_msec2hz_rem; 2158 uint64_t lkpi_msec2hz_div = 1000ULL; 2159 uint64_t lkpi_msec2hz_max; 2160 2161 static void 2162 linux_timer_init(void *arg) 2163 { 2164 uint64_t gcd; 2165 2166 /* 2167 * Compute an internal HZ value which can divide 2**32 to 2168 * avoid timer rounding problems when the tick value wraps 2169 * around 2**32: 2170 */ 2171 linux_timer_hz_mask = 1; 2172 while (linux_timer_hz_mask < (unsigned long)hz) 2173 linux_timer_hz_mask *= 2; 2174 linux_timer_hz_mask--; 2175 2176 /* compute some internal constants */ 2177 2178 lkpi_nsec2hz_rem = hz; 2179 lkpi_usec2hz_rem = hz; 2180 lkpi_msec2hz_rem = hz; 2181 2182 gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); 2183 lkpi_nsec2hz_rem /= gcd; 2184 lkpi_nsec2hz_div /= gcd; 2185 lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; 2186 2187 gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); 2188 lkpi_usec2hz_rem /= gcd; 2189 lkpi_usec2hz_div /= gcd; 2190 lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; 2191 2192 gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); 2193 lkpi_msec2hz_rem /= gcd; 2194 lkpi_msec2hz_div /= gcd; 2195 lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; 2196 } 2197 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 2198 2199 void 2200 linux_complete_common(struct completion *c, int all) 2201 { 2202 int wakeup_swapper; 2203 2204 sleepq_lock(c); 2205 if (all) { 2206 c->done = UINT_MAX; 2207 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 2208 } else { 2209 if (c->done != UINT_MAX) 2210 c->done++; 2211 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 2212 } 2213 sleepq_release(c); 2214 if (wakeup_swapper) 2215 kick_proc0(); 2216 } 2217 2218 /* 2219 * Indefinite wait for done != 0 with or without signals. 2220 */ 2221 int 2222 linux_wait_for_common(struct completion *c, int flags) 2223 { 2224 struct task_struct *task; 2225 int error; 2226 2227 if (SCHEDULER_STOPPED()) 2228 return (0); 2229 2230 task = current; 2231 2232 if (flags != 0) 2233 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2234 else 2235 flags = SLEEPQ_SLEEP; 2236 error = 0; 2237 for (;;) { 2238 sleepq_lock(c); 2239 if (c->done) 2240 break; 2241 sleepq_add(c, NULL, "completion", flags, 0); 2242 if (flags & SLEEPQ_INTERRUPTIBLE) { 2243 DROP_GIANT(); 2244 error = -sleepq_wait_sig(c, 0); 2245 PICKUP_GIANT(); 2246 if (error != 0) { 2247 linux_schedule_save_interrupt_value(task, error); 2248 error = -ERESTARTSYS; 2249 goto intr; 2250 } 2251 } else { 2252 DROP_GIANT(); 2253 sleepq_wait(c, 0); 2254 PICKUP_GIANT(); 2255 } 2256 } 2257 if (c->done != UINT_MAX) 2258 c->done--; 2259 sleepq_release(c); 2260 2261 intr: 2262 return (error); 2263 } 2264 2265 /* 2266 * Time limited wait for done != 0 with or without signals. 2267 */ 2268 int 2269 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 2270 { 2271 struct task_struct *task; 2272 int end = jiffies + timeout; 2273 int error; 2274 2275 if (SCHEDULER_STOPPED()) 2276 return (0); 2277 2278 task = current; 2279 2280 if (flags != 0) 2281 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 2282 else 2283 flags = SLEEPQ_SLEEP; 2284 2285 for (;;) { 2286 sleepq_lock(c); 2287 if (c->done) 2288 break; 2289 sleepq_add(c, NULL, "completion", flags, 0); 2290 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 2291 2292 DROP_GIANT(); 2293 if (flags & SLEEPQ_INTERRUPTIBLE) 2294 error = -sleepq_timedwait_sig(c, 0); 2295 else 2296 error = -sleepq_timedwait(c, 0); 2297 PICKUP_GIANT(); 2298 2299 if (error != 0) { 2300 /* check for timeout */ 2301 if (error == -EWOULDBLOCK) { 2302 error = 0; /* timeout */ 2303 } else { 2304 /* signal happened */ 2305 linux_schedule_save_interrupt_value(task, error); 2306 error = -ERESTARTSYS; 2307 } 2308 goto done; 2309 } 2310 } 2311 if (c->done != UINT_MAX) 2312 c->done--; 2313 sleepq_release(c); 2314 2315 /* return how many jiffies are left */ 2316 error = linux_timer_jiffies_until(end); 2317 done: 2318 return (error); 2319 } 2320 2321 int 2322 linux_try_wait_for_completion(struct completion *c) 2323 { 2324 int isdone; 2325 2326 sleepq_lock(c); 2327 isdone = (c->done != 0); 2328 if (c->done != 0 && c->done != UINT_MAX) 2329 c->done--; 2330 sleepq_release(c); 2331 return (isdone); 2332 } 2333 2334 int 2335 linux_completion_done(struct completion *c) 2336 { 2337 int isdone; 2338 2339 sleepq_lock(c); 2340 isdone = (c->done != 0); 2341 sleepq_release(c); 2342 return (isdone); 2343 } 2344 2345 static void 2346 linux_cdev_deref(struct linux_cdev *ldev) 2347 { 2348 if (refcount_release(&ldev->refs) && 2349 ldev->kobj.ktype == &linux_cdev_ktype) 2350 kfree(ldev); 2351 } 2352 2353 static void 2354 linux_cdev_release(struct kobject *kobj) 2355 { 2356 struct linux_cdev *cdev; 2357 struct kobject *parent; 2358 2359 cdev = container_of(kobj, struct linux_cdev, kobj); 2360 parent = kobj->parent; 2361 linux_destroy_dev(cdev); 2362 linux_cdev_deref(cdev); 2363 kobject_put(parent); 2364 } 2365 2366 static void 2367 linux_cdev_static_release(struct kobject *kobj) 2368 { 2369 struct cdev *cdev; 2370 struct linux_cdev *ldev; 2371 2372 ldev = container_of(kobj, struct linux_cdev, kobj); 2373 cdev = ldev->cdev; 2374 if (cdev != NULL) { 2375 destroy_dev(cdev); 2376 ldev->cdev = NULL; 2377 } 2378 kobject_put(kobj->parent); 2379 } 2380 2381 int 2382 linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) 2383 { 2384 int ret; 2385 2386 if (dev->devt != 0) { 2387 /* Set parent kernel object. */ 2388 ldev->kobj.parent = &dev->kobj; 2389 2390 /* 2391 * Unlike Linux we require the kobject of the 2392 * character device structure to have a valid name 2393 * before calling this function: 2394 */ 2395 if (ldev->kobj.name == NULL) 2396 return (-EINVAL); 2397 2398 ret = cdev_add(ldev, dev->devt, 1); 2399 if (ret) 2400 return (ret); 2401 } 2402 ret = device_add(dev); 2403 if (ret != 0 && dev->devt != 0) 2404 cdev_del(ldev); 2405 return (ret); 2406 } 2407 2408 void 2409 linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) 2410 { 2411 device_del(dev); 2412 2413 if (dev->devt != 0) 2414 cdev_del(ldev); 2415 } 2416 2417 static void 2418 linux_destroy_dev(struct linux_cdev *ldev) 2419 { 2420 2421 if (ldev->cdev == NULL) 2422 return; 2423 2424 MPASS((ldev->siref & LDEV_SI_DTR) == 0); 2425 MPASS(ldev->kobj.ktype == &linux_cdev_ktype); 2426 2427 atomic_set_int(&ldev->siref, LDEV_SI_DTR); 2428 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) 2429 pause("ldevdtr", hz / 4); 2430 2431 destroy_dev(ldev->cdev); 2432 ldev->cdev = NULL; 2433 } 2434 2435 const struct kobj_type linux_cdev_ktype = { 2436 .release = linux_cdev_release, 2437 }; 2438 2439 const struct kobj_type linux_cdev_static_ktype = { 2440 .release = linux_cdev_static_release, 2441 }; 2442 2443 static void 2444 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 2445 { 2446 struct notifier_block *nb; 2447 struct netdev_notifier_info ni; 2448 2449 nb = arg; 2450 ni.ifp = ifp; 2451 ni.dev = (struct net_device *)ifp; 2452 if (linkstate == LINK_STATE_UP) 2453 nb->notifier_call(nb, NETDEV_UP, &ni); 2454 else 2455 nb->notifier_call(nb, NETDEV_DOWN, &ni); 2456 } 2457 2458 static void 2459 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 2460 { 2461 struct notifier_block *nb; 2462 struct netdev_notifier_info ni; 2463 2464 nb = arg; 2465 ni.ifp = ifp; 2466 ni.dev = (struct net_device *)ifp; 2467 nb->notifier_call(nb, NETDEV_REGISTER, &ni); 2468 } 2469 2470 static void 2471 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 2472 { 2473 struct notifier_block *nb; 2474 struct netdev_notifier_info ni; 2475 2476 nb = arg; 2477 ni.ifp = ifp; 2478 ni.dev = (struct net_device *)ifp; 2479 nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); 2480 } 2481 2482 static void 2483 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 2484 { 2485 struct notifier_block *nb; 2486 struct netdev_notifier_info ni; 2487 2488 nb = arg; 2489 ni.ifp = ifp; 2490 ni.dev = (struct net_device *)ifp; 2491 nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); 2492 } 2493 2494 static void 2495 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 2496 { 2497 struct notifier_block *nb; 2498 struct netdev_notifier_info ni; 2499 2500 nb = arg; 2501 ni.ifp = ifp; 2502 ni.dev = (struct net_device *)ifp; 2503 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); 2504 } 2505 2506 int 2507 register_netdevice_notifier(struct notifier_block *nb) 2508 { 2509 2510 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 2511 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 2512 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 2513 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 2514 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 2515 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 2516 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 2517 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 2518 2519 return (0); 2520 } 2521 2522 int 2523 register_inetaddr_notifier(struct notifier_block *nb) 2524 { 2525 2526 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2527 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2528 return (0); 2529 } 2530 2531 int 2532 unregister_netdevice_notifier(struct notifier_block *nb) 2533 { 2534 2535 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2536 nb->tags[NETDEV_UP]); 2537 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2538 nb->tags[NETDEV_REGISTER]); 2539 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2540 nb->tags[NETDEV_UNREGISTER]); 2541 EVENTHANDLER_DEREGISTER(iflladdr_event, 2542 nb->tags[NETDEV_CHANGEADDR]); 2543 2544 return (0); 2545 } 2546 2547 int 2548 unregister_inetaddr_notifier(struct notifier_block *nb) 2549 { 2550 2551 EVENTHANDLER_DEREGISTER(ifaddr_event, 2552 nb->tags[NETDEV_CHANGEIFADDR]); 2553 2554 return (0); 2555 } 2556 2557 struct list_sort_thunk { 2558 int (*cmp)(void *, struct list_head *, struct list_head *); 2559 void *priv; 2560 }; 2561 2562 static inline int 2563 linux_le_cmp(void *priv, const void *d1, const void *d2) 2564 { 2565 struct list_head *le1, *le2; 2566 struct list_sort_thunk *thunk; 2567 2568 thunk = priv; 2569 le1 = *(__DECONST(struct list_head **, d1)); 2570 le2 = *(__DECONST(struct list_head **, d2)); 2571 return ((thunk->cmp)(thunk->priv, le1, le2)); 2572 } 2573 2574 void 2575 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2576 struct list_head *a, struct list_head *b)) 2577 { 2578 struct list_sort_thunk thunk; 2579 struct list_head **ar, *le; 2580 size_t count, i; 2581 2582 count = 0; 2583 list_for_each(le, head) 2584 count++; 2585 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2586 i = 0; 2587 list_for_each(le, head) 2588 ar[i++] = le; 2589 thunk.cmp = cmp; 2590 thunk.priv = priv; 2591 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2592 INIT_LIST_HEAD(head); 2593 for (i = 0; i < count; i++) 2594 list_add_tail(ar[i], head); 2595 free(ar, M_KMALLOC); 2596 } 2597 2598 #if defined(__i386__) || defined(__amd64__) 2599 int 2600 linux_wbinvd_on_all_cpus(void) 2601 { 2602 2603 pmap_invalidate_cache(); 2604 return (0); 2605 } 2606 #endif 2607 2608 int 2609 linux_on_each_cpu(void callback(void *), void *data) 2610 { 2611 2612 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2613 smp_no_rendezvous_barrier, data); 2614 return (0); 2615 } 2616 2617 int 2618 linux_in_atomic(void) 2619 { 2620 2621 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2622 } 2623 2624 struct linux_cdev * 2625 linux_find_cdev(const char *name, unsigned major, unsigned minor) 2626 { 2627 dev_t dev = MKDEV(major, minor); 2628 struct cdev *cdev; 2629 2630 dev_lock(); 2631 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2632 struct linux_cdev *ldev = cdev->si_drv1; 2633 if (ldev->dev == dev && 2634 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2635 break; 2636 } 2637 } 2638 dev_unlock(); 2639 2640 return (cdev != NULL ? cdev->si_drv1 : NULL); 2641 } 2642 2643 int 2644 __register_chrdev(unsigned int major, unsigned int baseminor, 2645 unsigned int count, const char *name, 2646 const struct file_operations *fops) 2647 { 2648 struct linux_cdev *cdev; 2649 int ret = 0; 2650 int i; 2651 2652 for (i = baseminor; i < baseminor + count; i++) { 2653 cdev = cdev_alloc(); 2654 cdev->ops = fops; 2655 kobject_set_name(&cdev->kobj, name); 2656 2657 ret = cdev_add(cdev, makedev(major, i), 1); 2658 if (ret != 0) 2659 break; 2660 } 2661 return (ret); 2662 } 2663 2664 int 2665 __register_chrdev_p(unsigned int major, unsigned int baseminor, 2666 unsigned int count, const char *name, 2667 const struct file_operations *fops, uid_t uid, 2668 gid_t gid, int mode) 2669 { 2670 struct linux_cdev *cdev; 2671 int ret = 0; 2672 int i; 2673 2674 for (i = baseminor; i < baseminor + count; i++) { 2675 cdev = cdev_alloc(); 2676 cdev->ops = fops; 2677 kobject_set_name(&cdev->kobj, name); 2678 2679 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2680 if (ret != 0) 2681 break; 2682 } 2683 return (ret); 2684 } 2685 2686 void 2687 __unregister_chrdev(unsigned int major, unsigned int baseminor, 2688 unsigned int count, const char *name) 2689 { 2690 struct linux_cdev *cdevp; 2691 int i; 2692 2693 for (i = baseminor; i < baseminor + count; i++) { 2694 cdevp = linux_find_cdev(name, major, i); 2695 if (cdevp != NULL) 2696 cdev_del(cdevp); 2697 } 2698 } 2699 2700 void 2701 linux_dump_stack(void) 2702 { 2703 #ifdef STACK 2704 struct stack st; 2705 2706 stack_save(&st); 2707 stack_print(&st); 2708 #endif 2709 } 2710 2711 int 2712 linuxkpi_net_ratelimit(void) 2713 { 2714 2715 return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, 2716 lkpi_net_maxpps)); 2717 } 2718 2719 #if defined(__i386__) || defined(__amd64__) 2720 bool linux_cpu_has_clflush; 2721 #endif 2722 2723 static void 2724 linux_compat_init(void *arg) 2725 { 2726 struct sysctl_oid *rootoid; 2727 int i; 2728 2729 #if defined(__i386__) || defined(__amd64__) 2730 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2731 #endif 2732 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2733 2734 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2735 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2736 kobject_init(&linux_class_root, &linux_class_ktype); 2737 kobject_set_name(&linux_class_root, "class"); 2738 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2739 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2740 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2741 kobject_set_name(&linux_root_device.kobj, "device"); 2742 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2743 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", 2744 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); 2745 linux_root_device.bsddev = root_bus; 2746 linux_class_misc.name = "misc"; 2747 class_register(&linux_class_misc); 2748 INIT_LIST_HEAD(&pci_drivers); 2749 INIT_LIST_HEAD(&pci_devices); 2750 spin_lock_init(&pci_lock); 2751 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2752 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2753 LIST_INIT(&vmmaphead[i]); 2754 init_waitqueue_head(&linux_bit_waitq); 2755 init_waitqueue_head(&linux_var_waitq); 2756 2757 CPU_COPY(&all_cpus, &cpu_online_mask); 2758 } 2759 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2760 2761 static void 2762 linux_compat_uninit(void *arg) 2763 { 2764 linux_kobject_kfree_name(&linux_class_root); 2765 linux_kobject_kfree_name(&linux_root_device.kobj); 2766 linux_kobject_kfree_name(&linux_class_misc.kobj); 2767 2768 mtx_destroy(&vmmaplock); 2769 spin_lock_destroy(&pci_lock); 2770 rw_destroy(&linux_vma_lock); 2771 } 2772 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2773 2774 /* 2775 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2776 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2777 * used. Assert these types have the same size, else some parts of the 2778 * LinuxKPI may not work like expected: 2779 */ 2780 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2781