1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/idr.h> 14 #include <linux/input/mt.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/major.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/pm.h> 23 #include <linux/poll.h> 24 #include <linux/device.h> 25 #include <linux/kstrtox.h> 26 #include <linux/mutex.h> 27 #include <linux/rcupdate.h> 28 #include "input-compat.h" 29 #include "input-core-private.h" 30 #include "input-poller.h" 31 32 MODULE_AUTHOR("Vojtech Pavlik <[email protected]>"); 33 MODULE_DESCRIPTION("Input core"); 34 MODULE_LICENSE("GPL"); 35 36 #define INPUT_MAX_CHAR_DEVICES 1024 37 #define INPUT_FIRST_DYNAMIC_DEV 256 38 static DEFINE_IDA(input_ida); 39 40 static LIST_HEAD(input_dev_list); 41 static LIST_HEAD(input_handler_list); 42 43 /* 44 * input_mutex protects access to both input_dev_list and input_handler_list. 45 * This also causes input_[un]register_device and input_[un]register_handler 46 * be mutually exclusive which simplifies locking in drivers implementing 47 * input handlers. 48 */ 49 static DEFINE_MUTEX(input_mutex); 50 51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 52 53 static const unsigned int input_max_code[EV_CNT] = { 54 [EV_KEY] = KEY_MAX, 55 [EV_REL] = REL_MAX, 56 [EV_ABS] = ABS_MAX, 57 [EV_MSC] = MSC_MAX, 58 [EV_SW] = SW_MAX, 59 [EV_LED] = LED_MAX, 60 [EV_SND] = SND_MAX, 61 [EV_FF] = FF_MAX, 62 }; 63 64 static inline int is_event_supported(unsigned int code, 65 unsigned long *bm, unsigned int max) 66 { 67 return code <= max && test_bit(code, bm); 68 } 69 70 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 71 { 72 if (fuzz) { 73 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 74 return old_val; 75 76 if (value > old_val - fuzz && value < old_val + fuzz) 77 return (old_val * 3 + value) / 4; 78 79 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 80 return (old_val + value) / 2; 81 } 82 83 return value; 84 } 85 86 static void input_start_autorepeat(struct input_dev *dev, int code) 87 { 88 if (test_bit(EV_REP, dev->evbit) && 89 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 90 dev->timer.function) { 91 dev->repeat_key = code; 92 mod_timer(&dev->timer, 93 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 94 } 95 } 96 97 static void input_stop_autorepeat(struct input_dev *dev) 98 { 99 del_timer(&dev->timer); 100 } 101 102 /* 103 * Pass values first through all filters and then, if event has not been 104 * filtered out, through all open handles. This order is achieved by placing 105 * filters at the head of the list of handles attached to the device, and 106 * placing regular handles at the tail of the list. 107 * 108 * This function is called with dev->event_lock held and interrupts disabled. 109 */ 110 static void input_pass_values(struct input_dev *dev, 111 struct input_value *vals, unsigned int count) 112 { 113 struct input_handle *handle; 114 struct input_value *v; 115 116 lockdep_assert_held(&dev->event_lock); 117 118 if (!count) 119 return; 120 121 rcu_read_lock(); 122 123 handle = rcu_dereference(dev->grab); 124 if (handle) { 125 count = handle->handler->events(handle, vals, count); 126 } else { 127 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 128 if (handle->open) { 129 count = handle->handler->events(handle, vals, 130 count); 131 if (!count) 132 break; 133 } 134 } 135 136 rcu_read_unlock(); 137 138 /* trigger auto repeat for key events */ 139 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 140 for (v = vals; v != vals + count; v++) { 141 if (v->type == EV_KEY && v->value != 2) { 142 if (v->value) 143 input_start_autorepeat(dev, v->code); 144 else 145 input_stop_autorepeat(dev); 146 } 147 } 148 } 149 } 150 151 #define INPUT_IGNORE_EVENT 0 152 #define INPUT_PASS_TO_HANDLERS 1 153 #define INPUT_PASS_TO_DEVICE 2 154 #define INPUT_SLOT 4 155 #define INPUT_FLUSH 8 156 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 157 158 static int input_handle_abs_event(struct input_dev *dev, 159 unsigned int code, int *pval) 160 { 161 struct input_mt *mt = dev->mt; 162 bool is_new_slot = false; 163 bool is_mt_event; 164 int *pold; 165 166 if (code == ABS_MT_SLOT) { 167 /* 168 * "Stage" the event; we'll flush it later, when we 169 * get actual touch data. 170 */ 171 if (mt && *pval >= 0 && *pval < mt->num_slots) 172 mt->slot = *pval; 173 174 return INPUT_IGNORE_EVENT; 175 } 176 177 is_mt_event = input_is_mt_value(code); 178 179 if (!is_mt_event) { 180 pold = &dev->absinfo[code].value; 181 } else if (mt) { 182 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 183 is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value; 184 } else { 185 /* 186 * Bypass filtering for multi-touch events when 187 * not employing slots. 188 */ 189 pold = NULL; 190 } 191 192 if (pold) { 193 *pval = input_defuzz_abs_event(*pval, *pold, 194 dev->absinfo[code].fuzz); 195 if (*pold == *pval) 196 return INPUT_IGNORE_EVENT; 197 198 *pold = *pval; 199 } 200 201 /* Flush pending "slot" event */ 202 if (is_new_slot) { 203 dev->absinfo[ABS_MT_SLOT].value = mt->slot; 204 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 205 } 206 207 return INPUT_PASS_TO_HANDLERS; 208 } 209 210 static int input_get_disposition(struct input_dev *dev, 211 unsigned int type, unsigned int code, int *pval) 212 { 213 int disposition = INPUT_IGNORE_EVENT; 214 int value = *pval; 215 216 /* filter-out events from inhibited devices */ 217 if (dev->inhibited) 218 return INPUT_IGNORE_EVENT; 219 220 switch (type) { 221 222 case EV_SYN: 223 switch (code) { 224 case SYN_CONFIG: 225 disposition = INPUT_PASS_TO_ALL; 226 break; 227 228 case SYN_REPORT: 229 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 230 break; 231 case SYN_MT_REPORT: 232 disposition = INPUT_PASS_TO_HANDLERS; 233 break; 234 } 235 break; 236 237 case EV_KEY: 238 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 239 240 /* auto-repeat bypasses state updates */ 241 if (value == 2) { 242 disposition = INPUT_PASS_TO_HANDLERS; 243 break; 244 } 245 246 if (!!test_bit(code, dev->key) != !!value) { 247 248 __change_bit(code, dev->key); 249 disposition = INPUT_PASS_TO_HANDLERS; 250 } 251 } 252 break; 253 254 case EV_SW: 255 if (is_event_supported(code, dev->swbit, SW_MAX) && 256 !!test_bit(code, dev->sw) != !!value) { 257 258 __change_bit(code, dev->sw); 259 disposition = INPUT_PASS_TO_HANDLERS; 260 } 261 break; 262 263 case EV_ABS: 264 if (is_event_supported(code, dev->absbit, ABS_MAX)) 265 disposition = input_handle_abs_event(dev, code, &value); 266 267 break; 268 269 case EV_REL: 270 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 271 disposition = INPUT_PASS_TO_HANDLERS; 272 273 break; 274 275 case EV_MSC: 276 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 277 disposition = INPUT_PASS_TO_ALL; 278 279 break; 280 281 case EV_LED: 282 if (is_event_supported(code, dev->ledbit, LED_MAX) && 283 !!test_bit(code, dev->led) != !!value) { 284 285 __change_bit(code, dev->led); 286 disposition = INPUT_PASS_TO_ALL; 287 } 288 break; 289 290 case EV_SND: 291 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 292 293 if (!!test_bit(code, dev->snd) != !!value) 294 __change_bit(code, dev->snd); 295 disposition = INPUT_PASS_TO_ALL; 296 } 297 break; 298 299 case EV_REP: 300 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 301 dev->rep[code] = value; 302 disposition = INPUT_PASS_TO_ALL; 303 } 304 break; 305 306 case EV_FF: 307 if (value >= 0) 308 disposition = INPUT_PASS_TO_ALL; 309 break; 310 311 case EV_PWR: 312 disposition = INPUT_PASS_TO_ALL; 313 break; 314 } 315 316 *pval = value; 317 return disposition; 318 } 319 320 static void input_event_dispose(struct input_dev *dev, int disposition, 321 unsigned int type, unsigned int code, int value) 322 { 323 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 324 dev->event(dev, type, code, value); 325 326 if (!dev->vals) 327 return; 328 329 if (disposition & INPUT_PASS_TO_HANDLERS) { 330 struct input_value *v; 331 332 if (disposition & INPUT_SLOT) { 333 v = &dev->vals[dev->num_vals++]; 334 v->type = EV_ABS; 335 v->code = ABS_MT_SLOT; 336 v->value = dev->mt->slot; 337 } 338 339 v = &dev->vals[dev->num_vals++]; 340 v->type = type; 341 v->code = code; 342 v->value = value; 343 } 344 345 if (disposition & INPUT_FLUSH) { 346 if (dev->num_vals >= 2) 347 input_pass_values(dev, dev->vals, dev->num_vals); 348 dev->num_vals = 0; 349 /* 350 * Reset the timestamp on flush so we won't end up 351 * with a stale one. Note we only need to reset the 352 * monolithic one as we use its presence when deciding 353 * whether to generate a synthetic timestamp. 354 */ 355 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 356 } else if (dev->num_vals >= dev->max_vals - 2) { 357 dev->vals[dev->num_vals++] = input_value_sync; 358 input_pass_values(dev, dev->vals, dev->num_vals); 359 dev->num_vals = 0; 360 } 361 } 362 363 void input_handle_event(struct input_dev *dev, 364 unsigned int type, unsigned int code, int value) 365 { 366 int disposition; 367 368 lockdep_assert_held(&dev->event_lock); 369 370 disposition = input_get_disposition(dev, type, code, &value); 371 if (disposition != INPUT_IGNORE_EVENT) { 372 if (type != EV_SYN) 373 add_input_randomness(type, code, value); 374 375 input_event_dispose(dev, disposition, type, code, value); 376 } 377 } 378 379 /** 380 * input_event() - report new input event 381 * @dev: device that generated the event 382 * @type: type of the event 383 * @code: event code 384 * @value: value of the event 385 * 386 * This function should be used by drivers implementing various input 387 * devices to report input events. See also input_inject_event(). 388 * 389 * NOTE: input_event() may be safely used right after input device was 390 * allocated with input_allocate_device(), even before it is registered 391 * with input_register_device(), but the event will not reach any of the 392 * input handlers. Such early invocation of input_event() may be used 393 * to 'seed' initial state of a switch or initial position of absolute 394 * axis, etc. 395 */ 396 void input_event(struct input_dev *dev, 397 unsigned int type, unsigned int code, int value) 398 { 399 unsigned long flags; 400 401 if (is_event_supported(type, dev->evbit, EV_MAX)) { 402 403 spin_lock_irqsave(&dev->event_lock, flags); 404 input_handle_event(dev, type, code, value); 405 spin_unlock_irqrestore(&dev->event_lock, flags); 406 } 407 } 408 EXPORT_SYMBOL(input_event); 409 410 /** 411 * input_inject_event() - send input event from input handler 412 * @handle: input handle to send event through 413 * @type: type of the event 414 * @code: event code 415 * @value: value of the event 416 * 417 * Similar to input_event() but will ignore event if device is 418 * "grabbed" and handle injecting event is not the one that owns 419 * the device. 420 */ 421 void input_inject_event(struct input_handle *handle, 422 unsigned int type, unsigned int code, int value) 423 { 424 struct input_dev *dev = handle->dev; 425 struct input_handle *grab; 426 unsigned long flags; 427 428 if (is_event_supported(type, dev->evbit, EV_MAX)) { 429 spin_lock_irqsave(&dev->event_lock, flags); 430 431 rcu_read_lock(); 432 grab = rcu_dereference(dev->grab); 433 if (!grab || grab == handle) 434 input_handle_event(dev, type, code, value); 435 rcu_read_unlock(); 436 437 spin_unlock_irqrestore(&dev->event_lock, flags); 438 } 439 } 440 EXPORT_SYMBOL(input_inject_event); 441 442 /** 443 * input_alloc_absinfo - allocates array of input_absinfo structs 444 * @dev: the input device emitting absolute events 445 * 446 * If the absinfo struct the caller asked for is already allocated, this 447 * functions will not do anything. 448 */ 449 void input_alloc_absinfo(struct input_dev *dev) 450 { 451 if (dev->absinfo) 452 return; 453 454 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 455 if (!dev->absinfo) { 456 dev_err(dev->dev.parent ?: &dev->dev, 457 "%s: unable to allocate memory\n", __func__); 458 /* 459 * We will handle this allocation failure in 460 * input_register_device() when we refuse to register input 461 * device with ABS bits but without absinfo. 462 */ 463 } 464 } 465 EXPORT_SYMBOL(input_alloc_absinfo); 466 467 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 468 int min, int max, int fuzz, int flat) 469 { 470 struct input_absinfo *absinfo; 471 472 __set_bit(EV_ABS, dev->evbit); 473 __set_bit(axis, dev->absbit); 474 475 input_alloc_absinfo(dev); 476 if (!dev->absinfo) 477 return; 478 479 absinfo = &dev->absinfo[axis]; 480 absinfo->minimum = min; 481 absinfo->maximum = max; 482 absinfo->fuzz = fuzz; 483 absinfo->flat = flat; 484 } 485 EXPORT_SYMBOL(input_set_abs_params); 486 487 /** 488 * input_copy_abs - Copy absinfo from one input_dev to another 489 * @dst: Destination input device to copy the abs settings to 490 * @dst_axis: ABS_* value selecting the destination axis 491 * @src: Source input device to copy the abs settings from 492 * @src_axis: ABS_* value selecting the source axis 493 * 494 * Set absinfo for the selected destination axis by copying it from 495 * the specified source input device's source axis. 496 * This is useful to e.g. setup a pen/stylus input-device for combined 497 * touchscreen/pen hardware where the pen uses the same coordinates as 498 * the touchscreen. 499 */ 500 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, 501 const struct input_dev *src, unsigned int src_axis) 502 { 503 /* src must have EV_ABS and src_axis set */ 504 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) && 505 test_bit(src_axis, src->absbit)))) 506 return; 507 508 /* 509 * input_alloc_absinfo() may have failed for the source. Our caller is 510 * expected to catch this when registering the input devices, which may 511 * happen after the input_copy_abs() call. 512 */ 513 if (!src->absinfo) 514 return; 515 516 input_set_capability(dst, EV_ABS, dst_axis); 517 if (!dst->absinfo) 518 return; 519 520 dst->absinfo[dst_axis] = src->absinfo[src_axis]; 521 } 522 EXPORT_SYMBOL(input_copy_abs); 523 524 /** 525 * input_grab_device - grabs device for exclusive use 526 * @handle: input handle that wants to own the device 527 * 528 * When a device is grabbed by an input handle all events generated by 529 * the device are delivered only to this handle. Also events injected 530 * by other input handles are ignored while device is grabbed. 531 */ 532 int input_grab_device(struct input_handle *handle) 533 { 534 struct input_dev *dev = handle->dev; 535 int retval; 536 537 retval = mutex_lock_interruptible(&dev->mutex); 538 if (retval) 539 return retval; 540 541 if (dev->grab) { 542 retval = -EBUSY; 543 goto out; 544 } 545 546 rcu_assign_pointer(dev->grab, handle); 547 548 out: 549 mutex_unlock(&dev->mutex); 550 return retval; 551 } 552 EXPORT_SYMBOL(input_grab_device); 553 554 static void __input_release_device(struct input_handle *handle) 555 { 556 struct input_dev *dev = handle->dev; 557 struct input_handle *grabber; 558 559 grabber = rcu_dereference_protected(dev->grab, 560 lockdep_is_held(&dev->mutex)); 561 if (grabber == handle) { 562 rcu_assign_pointer(dev->grab, NULL); 563 /* Make sure input_pass_values() notices that grab is gone */ 564 synchronize_rcu(); 565 566 list_for_each_entry(handle, &dev->h_list, d_node) 567 if (handle->open && handle->handler->start) 568 handle->handler->start(handle); 569 } 570 } 571 572 /** 573 * input_release_device - release previously grabbed device 574 * @handle: input handle that owns the device 575 * 576 * Releases previously grabbed device so that other input handles can 577 * start receiving input events. Upon release all handlers attached 578 * to the device have their start() method called so they have a change 579 * to synchronize device state with the rest of the system. 580 */ 581 void input_release_device(struct input_handle *handle) 582 { 583 struct input_dev *dev = handle->dev; 584 585 mutex_lock(&dev->mutex); 586 __input_release_device(handle); 587 mutex_unlock(&dev->mutex); 588 } 589 EXPORT_SYMBOL(input_release_device); 590 591 /** 592 * input_open_device - open input device 593 * @handle: handle through which device is being accessed 594 * 595 * This function should be called by input handlers when they 596 * want to start receive events from given input device. 597 */ 598 int input_open_device(struct input_handle *handle) 599 { 600 struct input_dev *dev = handle->dev; 601 int retval; 602 603 retval = mutex_lock_interruptible(&dev->mutex); 604 if (retval) 605 return retval; 606 607 if (dev->going_away) { 608 retval = -ENODEV; 609 goto out; 610 } 611 612 handle->open++; 613 614 if (dev->users++ || dev->inhibited) { 615 /* 616 * Device is already opened and/or inhibited, 617 * so we can exit immediately and report success. 618 */ 619 goto out; 620 } 621 622 if (dev->open) { 623 retval = dev->open(dev); 624 if (retval) { 625 dev->users--; 626 handle->open--; 627 /* 628 * Make sure we are not delivering any more events 629 * through this handle 630 */ 631 synchronize_rcu(); 632 goto out; 633 } 634 } 635 636 if (dev->poller) 637 input_dev_poller_start(dev->poller); 638 639 out: 640 mutex_unlock(&dev->mutex); 641 return retval; 642 } 643 EXPORT_SYMBOL(input_open_device); 644 645 int input_flush_device(struct input_handle *handle, struct file *file) 646 { 647 struct input_dev *dev = handle->dev; 648 int retval; 649 650 retval = mutex_lock_interruptible(&dev->mutex); 651 if (retval) 652 return retval; 653 654 if (dev->flush) 655 retval = dev->flush(dev, file); 656 657 mutex_unlock(&dev->mutex); 658 return retval; 659 } 660 EXPORT_SYMBOL(input_flush_device); 661 662 /** 663 * input_close_device - close input device 664 * @handle: handle through which device is being accessed 665 * 666 * This function should be called by input handlers when they 667 * want to stop receive events from given input device. 668 */ 669 void input_close_device(struct input_handle *handle) 670 { 671 struct input_dev *dev = handle->dev; 672 673 mutex_lock(&dev->mutex); 674 675 __input_release_device(handle); 676 677 if (!--dev->users && !dev->inhibited) { 678 if (dev->poller) 679 input_dev_poller_stop(dev->poller); 680 if (dev->close) 681 dev->close(dev); 682 } 683 684 if (!--handle->open) { 685 /* 686 * synchronize_rcu() makes sure that input_pass_values() 687 * completed and that no more input events are delivered 688 * through this handle 689 */ 690 synchronize_rcu(); 691 } 692 693 mutex_unlock(&dev->mutex); 694 } 695 EXPORT_SYMBOL(input_close_device); 696 697 /* 698 * Simulate keyup events for all keys that are marked as pressed. 699 * The function must be called with dev->event_lock held. 700 */ 701 static bool input_dev_release_keys(struct input_dev *dev) 702 { 703 bool need_sync = false; 704 int code; 705 706 lockdep_assert_held(&dev->event_lock); 707 708 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 709 for_each_set_bit(code, dev->key, KEY_CNT) { 710 input_handle_event(dev, EV_KEY, code, 0); 711 need_sync = true; 712 } 713 } 714 715 return need_sync; 716 } 717 718 /* 719 * Prepare device for unregistering 720 */ 721 static void input_disconnect_device(struct input_dev *dev) 722 { 723 struct input_handle *handle; 724 725 /* 726 * Mark device as going away. Note that we take dev->mutex here 727 * not to protect access to dev->going_away but rather to ensure 728 * that there are no threads in the middle of input_open_device() 729 */ 730 mutex_lock(&dev->mutex); 731 dev->going_away = true; 732 mutex_unlock(&dev->mutex); 733 734 spin_lock_irq(&dev->event_lock); 735 736 /* 737 * Simulate keyup events for all pressed keys so that handlers 738 * are not left with "stuck" keys. The driver may continue 739 * generate events even after we done here but they will not 740 * reach any handlers. 741 */ 742 if (input_dev_release_keys(dev)) 743 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 744 745 list_for_each_entry(handle, &dev->h_list, d_node) 746 handle->open = 0; 747 748 spin_unlock_irq(&dev->event_lock); 749 } 750 751 /** 752 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 753 * @ke: keymap entry containing scancode to be converted. 754 * @scancode: pointer to the location where converted scancode should 755 * be stored. 756 * 757 * This function is used to convert scancode stored in &struct keymap_entry 758 * into scalar form understood by legacy keymap handling methods. These 759 * methods expect scancodes to be represented as 'unsigned int'. 760 */ 761 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 762 unsigned int *scancode) 763 { 764 switch (ke->len) { 765 case 1: 766 *scancode = *((u8 *)ke->scancode); 767 break; 768 769 case 2: 770 *scancode = *((u16 *)ke->scancode); 771 break; 772 773 case 4: 774 *scancode = *((u32 *)ke->scancode); 775 break; 776 777 default: 778 return -EINVAL; 779 } 780 781 return 0; 782 } 783 EXPORT_SYMBOL(input_scancode_to_scalar); 784 785 /* 786 * Those routines handle the default case where no [gs]etkeycode() is 787 * defined. In this case, an array indexed by the scancode is used. 788 */ 789 790 static unsigned int input_fetch_keycode(struct input_dev *dev, 791 unsigned int index) 792 { 793 switch (dev->keycodesize) { 794 case 1: 795 return ((u8 *)dev->keycode)[index]; 796 797 case 2: 798 return ((u16 *)dev->keycode)[index]; 799 800 default: 801 return ((u32 *)dev->keycode)[index]; 802 } 803 } 804 805 static int input_default_getkeycode(struct input_dev *dev, 806 struct input_keymap_entry *ke) 807 { 808 unsigned int index; 809 int error; 810 811 if (!dev->keycodesize) 812 return -EINVAL; 813 814 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 815 index = ke->index; 816 else { 817 error = input_scancode_to_scalar(ke, &index); 818 if (error) 819 return error; 820 } 821 822 if (index >= dev->keycodemax) 823 return -EINVAL; 824 825 ke->keycode = input_fetch_keycode(dev, index); 826 ke->index = index; 827 ke->len = sizeof(index); 828 memcpy(ke->scancode, &index, sizeof(index)); 829 830 return 0; 831 } 832 833 static int input_default_setkeycode(struct input_dev *dev, 834 const struct input_keymap_entry *ke, 835 unsigned int *old_keycode) 836 { 837 unsigned int index; 838 int error; 839 int i; 840 841 if (!dev->keycodesize) 842 return -EINVAL; 843 844 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 845 index = ke->index; 846 } else { 847 error = input_scancode_to_scalar(ke, &index); 848 if (error) 849 return error; 850 } 851 852 if (index >= dev->keycodemax) 853 return -EINVAL; 854 855 if (dev->keycodesize < sizeof(ke->keycode) && 856 (ke->keycode >> (dev->keycodesize * 8))) 857 return -EINVAL; 858 859 switch (dev->keycodesize) { 860 case 1: { 861 u8 *k = (u8 *)dev->keycode; 862 *old_keycode = k[index]; 863 k[index] = ke->keycode; 864 break; 865 } 866 case 2: { 867 u16 *k = (u16 *)dev->keycode; 868 *old_keycode = k[index]; 869 k[index] = ke->keycode; 870 break; 871 } 872 default: { 873 u32 *k = (u32 *)dev->keycode; 874 *old_keycode = k[index]; 875 k[index] = ke->keycode; 876 break; 877 } 878 } 879 880 if (*old_keycode <= KEY_MAX) { 881 __clear_bit(*old_keycode, dev->keybit); 882 for (i = 0; i < dev->keycodemax; i++) { 883 if (input_fetch_keycode(dev, i) == *old_keycode) { 884 __set_bit(*old_keycode, dev->keybit); 885 /* Setting the bit twice is useless, so break */ 886 break; 887 } 888 } 889 } 890 891 __set_bit(ke->keycode, dev->keybit); 892 return 0; 893 } 894 895 /** 896 * input_get_keycode - retrieve keycode currently mapped to a given scancode 897 * @dev: input device which keymap is being queried 898 * @ke: keymap entry 899 * 900 * This function should be called by anyone interested in retrieving current 901 * keymap. Presently evdev handlers use it. 902 */ 903 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 904 { 905 unsigned long flags; 906 int retval; 907 908 spin_lock_irqsave(&dev->event_lock, flags); 909 retval = dev->getkeycode(dev, ke); 910 spin_unlock_irqrestore(&dev->event_lock, flags); 911 912 return retval; 913 } 914 EXPORT_SYMBOL(input_get_keycode); 915 916 /** 917 * input_set_keycode - attribute a keycode to a given scancode 918 * @dev: input device which keymap is being updated 919 * @ke: new keymap entry 920 * 921 * This function should be called by anyone needing to update current 922 * keymap. Presently keyboard and evdev handlers use it. 923 */ 924 int input_set_keycode(struct input_dev *dev, 925 const struct input_keymap_entry *ke) 926 { 927 unsigned long flags; 928 unsigned int old_keycode; 929 int retval; 930 931 if (ke->keycode > KEY_MAX) 932 return -EINVAL; 933 934 spin_lock_irqsave(&dev->event_lock, flags); 935 936 retval = dev->setkeycode(dev, ke, &old_keycode); 937 if (retval) 938 goto out; 939 940 /* Make sure KEY_RESERVED did not get enabled. */ 941 __clear_bit(KEY_RESERVED, dev->keybit); 942 943 /* 944 * Simulate keyup event if keycode is not present 945 * in the keymap anymore 946 */ 947 if (old_keycode > KEY_MAX) { 948 dev_warn(dev->dev.parent ?: &dev->dev, 949 "%s: got too big old keycode %#x\n", 950 __func__, old_keycode); 951 } else if (test_bit(EV_KEY, dev->evbit) && 952 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 953 __test_and_clear_bit(old_keycode, dev->key)) { 954 /* 955 * We have to use input_event_dispose() here directly instead 956 * of input_handle_event() because the key we want to release 957 * here is considered no longer supported by the device and 958 * input_handle_event() will ignore it. 959 */ 960 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS, 961 EV_KEY, old_keycode, 0); 962 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH, 963 EV_SYN, SYN_REPORT, 1); 964 } 965 966 out: 967 spin_unlock_irqrestore(&dev->event_lock, flags); 968 969 return retval; 970 } 971 EXPORT_SYMBOL(input_set_keycode); 972 973 bool input_match_device_id(const struct input_dev *dev, 974 const struct input_device_id *id) 975 { 976 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 977 if (id->bustype != dev->id.bustype) 978 return false; 979 980 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 981 if (id->vendor != dev->id.vendor) 982 return false; 983 984 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 985 if (id->product != dev->id.product) 986 return false; 987 988 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 989 if (id->version != dev->id.version) 990 return false; 991 992 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 993 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 994 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 995 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 996 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 997 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 998 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 999 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 1000 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 1001 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 1002 return false; 1003 } 1004 1005 return true; 1006 } 1007 EXPORT_SYMBOL(input_match_device_id); 1008 1009 static const struct input_device_id *input_match_device(struct input_handler *handler, 1010 struct input_dev *dev) 1011 { 1012 const struct input_device_id *id; 1013 1014 for (id = handler->id_table; id->flags || id->driver_info; id++) { 1015 if (input_match_device_id(dev, id) && 1016 (!handler->match || handler->match(handler, dev))) { 1017 return id; 1018 } 1019 } 1020 1021 return NULL; 1022 } 1023 1024 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 1025 { 1026 const struct input_device_id *id; 1027 int error; 1028 1029 id = input_match_device(handler, dev); 1030 if (!id) 1031 return -ENODEV; 1032 1033 error = handler->connect(handler, dev, id); 1034 if (error && error != -ENODEV) 1035 pr_err("failed to attach handler %s to device %s, error: %d\n", 1036 handler->name, kobject_name(&dev->dev.kobj), error); 1037 1038 return error; 1039 } 1040 1041 #ifdef CONFIG_COMPAT 1042 1043 static int input_bits_to_string(char *buf, int buf_size, 1044 unsigned long bits, bool skip_empty) 1045 { 1046 int len = 0; 1047 1048 if (in_compat_syscall()) { 1049 u32 dword = bits >> 32; 1050 if (dword || !skip_empty) 1051 len += snprintf(buf, buf_size, "%x ", dword); 1052 1053 dword = bits & 0xffffffffUL; 1054 if (dword || !skip_empty || len) 1055 len += snprintf(buf + len, max(buf_size - len, 0), 1056 "%x", dword); 1057 } else { 1058 if (bits || !skip_empty) 1059 len += snprintf(buf, buf_size, "%lx", bits); 1060 } 1061 1062 return len; 1063 } 1064 1065 #else /* !CONFIG_COMPAT */ 1066 1067 static int input_bits_to_string(char *buf, int buf_size, 1068 unsigned long bits, bool skip_empty) 1069 { 1070 return bits || !skip_empty ? 1071 snprintf(buf, buf_size, "%lx", bits) : 0; 1072 } 1073 1074 #endif 1075 1076 #ifdef CONFIG_PROC_FS 1077 1078 static struct proc_dir_entry *proc_bus_input_dir; 1079 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1080 static int input_devices_state; 1081 1082 static inline void input_wakeup_procfs_readers(void) 1083 { 1084 input_devices_state++; 1085 wake_up(&input_devices_poll_wait); 1086 } 1087 1088 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1089 { 1090 poll_wait(file, &input_devices_poll_wait, wait); 1091 if (file->f_version != input_devices_state) { 1092 file->f_version = input_devices_state; 1093 return EPOLLIN | EPOLLRDNORM; 1094 } 1095 1096 return 0; 1097 } 1098 1099 union input_seq_state { 1100 struct { 1101 unsigned short pos; 1102 bool mutex_acquired; 1103 }; 1104 void *p; 1105 }; 1106 1107 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1108 { 1109 union input_seq_state *state = (union input_seq_state *)&seq->private; 1110 int error; 1111 1112 /* We need to fit into seq->private pointer */ 1113 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1114 1115 error = mutex_lock_interruptible(&input_mutex); 1116 if (error) { 1117 state->mutex_acquired = false; 1118 return ERR_PTR(error); 1119 } 1120 1121 state->mutex_acquired = true; 1122 1123 return seq_list_start(&input_dev_list, *pos); 1124 } 1125 1126 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1127 { 1128 return seq_list_next(v, &input_dev_list, pos); 1129 } 1130 1131 static void input_seq_stop(struct seq_file *seq, void *v) 1132 { 1133 union input_seq_state *state = (union input_seq_state *)&seq->private; 1134 1135 if (state->mutex_acquired) 1136 mutex_unlock(&input_mutex); 1137 } 1138 1139 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1140 unsigned long *bitmap, int max) 1141 { 1142 int i; 1143 bool skip_empty = true; 1144 char buf[18]; 1145 1146 seq_printf(seq, "B: %s=", name); 1147 1148 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1149 if (input_bits_to_string(buf, sizeof(buf), 1150 bitmap[i], skip_empty)) { 1151 skip_empty = false; 1152 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1153 } 1154 } 1155 1156 /* 1157 * If no output was produced print a single 0. 1158 */ 1159 if (skip_empty) 1160 seq_putc(seq, '0'); 1161 1162 seq_putc(seq, '\n'); 1163 } 1164 1165 static int input_devices_seq_show(struct seq_file *seq, void *v) 1166 { 1167 struct input_dev *dev = container_of(v, struct input_dev, node); 1168 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1169 struct input_handle *handle; 1170 1171 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1172 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1173 1174 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1175 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1176 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1177 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1178 seq_puts(seq, "H: Handlers="); 1179 1180 list_for_each_entry(handle, &dev->h_list, d_node) 1181 seq_printf(seq, "%s ", handle->name); 1182 seq_putc(seq, '\n'); 1183 1184 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1185 1186 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1187 if (test_bit(EV_KEY, dev->evbit)) 1188 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1189 if (test_bit(EV_REL, dev->evbit)) 1190 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1191 if (test_bit(EV_ABS, dev->evbit)) 1192 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1193 if (test_bit(EV_MSC, dev->evbit)) 1194 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1195 if (test_bit(EV_LED, dev->evbit)) 1196 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1197 if (test_bit(EV_SND, dev->evbit)) 1198 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1199 if (test_bit(EV_FF, dev->evbit)) 1200 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1201 if (test_bit(EV_SW, dev->evbit)) 1202 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1203 1204 seq_putc(seq, '\n'); 1205 1206 kfree(path); 1207 return 0; 1208 } 1209 1210 static const struct seq_operations input_devices_seq_ops = { 1211 .start = input_devices_seq_start, 1212 .next = input_devices_seq_next, 1213 .stop = input_seq_stop, 1214 .show = input_devices_seq_show, 1215 }; 1216 1217 static int input_proc_devices_open(struct inode *inode, struct file *file) 1218 { 1219 return seq_open(file, &input_devices_seq_ops); 1220 } 1221 1222 static const struct proc_ops input_devices_proc_ops = { 1223 .proc_open = input_proc_devices_open, 1224 .proc_poll = input_proc_devices_poll, 1225 .proc_read = seq_read, 1226 .proc_lseek = seq_lseek, 1227 .proc_release = seq_release, 1228 }; 1229 1230 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1231 { 1232 union input_seq_state *state = (union input_seq_state *)&seq->private; 1233 int error; 1234 1235 /* We need to fit into seq->private pointer */ 1236 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1237 1238 error = mutex_lock_interruptible(&input_mutex); 1239 if (error) { 1240 state->mutex_acquired = false; 1241 return ERR_PTR(error); 1242 } 1243 1244 state->mutex_acquired = true; 1245 state->pos = *pos; 1246 1247 return seq_list_start(&input_handler_list, *pos); 1248 } 1249 1250 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1251 { 1252 union input_seq_state *state = (union input_seq_state *)&seq->private; 1253 1254 state->pos = *pos + 1; 1255 return seq_list_next(v, &input_handler_list, pos); 1256 } 1257 1258 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1259 { 1260 struct input_handler *handler = container_of(v, struct input_handler, node); 1261 union input_seq_state *state = (union input_seq_state *)&seq->private; 1262 1263 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1264 if (handler->filter) 1265 seq_puts(seq, " (filter)"); 1266 if (handler->legacy_minors) 1267 seq_printf(seq, " Minor=%d", handler->minor); 1268 seq_putc(seq, '\n'); 1269 1270 return 0; 1271 } 1272 1273 static const struct seq_operations input_handlers_seq_ops = { 1274 .start = input_handlers_seq_start, 1275 .next = input_handlers_seq_next, 1276 .stop = input_seq_stop, 1277 .show = input_handlers_seq_show, 1278 }; 1279 1280 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1281 { 1282 return seq_open(file, &input_handlers_seq_ops); 1283 } 1284 1285 static const struct proc_ops input_handlers_proc_ops = { 1286 .proc_open = input_proc_handlers_open, 1287 .proc_read = seq_read, 1288 .proc_lseek = seq_lseek, 1289 .proc_release = seq_release, 1290 }; 1291 1292 static int __init input_proc_init(void) 1293 { 1294 struct proc_dir_entry *entry; 1295 1296 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1297 if (!proc_bus_input_dir) 1298 return -ENOMEM; 1299 1300 entry = proc_create("devices", 0, proc_bus_input_dir, 1301 &input_devices_proc_ops); 1302 if (!entry) 1303 goto fail1; 1304 1305 entry = proc_create("handlers", 0, proc_bus_input_dir, 1306 &input_handlers_proc_ops); 1307 if (!entry) 1308 goto fail2; 1309 1310 return 0; 1311 1312 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1313 fail1: remove_proc_entry("bus/input", NULL); 1314 return -ENOMEM; 1315 } 1316 1317 static void input_proc_exit(void) 1318 { 1319 remove_proc_entry("devices", proc_bus_input_dir); 1320 remove_proc_entry("handlers", proc_bus_input_dir); 1321 remove_proc_entry("bus/input", NULL); 1322 } 1323 1324 #else /* !CONFIG_PROC_FS */ 1325 static inline void input_wakeup_procfs_readers(void) { } 1326 static inline int input_proc_init(void) { return 0; } 1327 static inline void input_proc_exit(void) { } 1328 #endif 1329 1330 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1331 static ssize_t input_dev_show_##name(struct device *dev, \ 1332 struct device_attribute *attr, \ 1333 char *buf) \ 1334 { \ 1335 struct input_dev *input_dev = to_input_dev(dev); \ 1336 \ 1337 return sysfs_emit(buf, "%s\n", \ 1338 input_dev->name ? input_dev->name : ""); \ 1339 } \ 1340 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1341 1342 INPUT_DEV_STRING_ATTR_SHOW(name); 1343 INPUT_DEV_STRING_ATTR_SHOW(phys); 1344 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1345 1346 static int input_print_modalias_bits(char *buf, int size, 1347 char name, const unsigned long *bm, 1348 unsigned int min_bit, unsigned int max_bit) 1349 { 1350 int bit = min_bit; 1351 int len = 0; 1352 1353 len += snprintf(buf, max(size, 0), "%c", name); 1354 for_each_set_bit_from(bit, bm, max_bit) 1355 len += snprintf(buf + len, max(size - len, 0), "%X,", bit); 1356 return len; 1357 } 1358 1359 static int input_print_modalias_parts(char *buf, int size, int full_len, 1360 const struct input_dev *id) 1361 { 1362 int len, klen, remainder, space; 1363 1364 len = snprintf(buf, max(size, 0), 1365 "input:b%04Xv%04Xp%04Xe%04X-", 1366 id->id.bustype, id->id.vendor, 1367 id->id.product, id->id.version); 1368 1369 len += input_print_modalias_bits(buf + len, size - len, 1370 'e', id->evbit, 0, EV_MAX); 1371 1372 /* 1373 * Calculate the remaining space in the buffer making sure we 1374 * have place for the terminating 0. 1375 */ 1376 space = max(size - (len + 1), 0); 1377 1378 klen = input_print_modalias_bits(buf + len, size - len, 1379 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1380 len += klen; 1381 1382 /* 1383 * If we have more data than we can fit in the buffer, check 1384 * if we can trim key data to fit in the rest. We will indicate 1385 * that key data is incomplete by adding "+" sign at the end, like 1386 * this: * "k1,2,3,45,+,". 1387 * 1388 * Note that we shortest key info (if present) is "k+," so we 1389 * can only try to trim if key data is longer than that. 1390 */ 1391 if (full_len && size < full_len + 1 && klen > 3) { 1392 remainder = full_len - len; 1393 /* 1394 * We can only trim if we have space for the remainder 1395 * and also for at least "k+," which is 3 more characters. 1396 */ 1397 if (remainder <= space - 3) { 1398 /* 1399 * We are guaranteed to have 'k' in the buffer, so 1400 * we need at least 3 additional bytes for storing 1401 * "+," in addition to the remainder. 1402 */ 1403 for (int i = size - 1 - remainder - 3; i >= 0; i--) { 1404 if (buf[i] == 'k' || buf[i] == ',') { 1405 strcpy(buf + i + 1, "+,"); 1406 len = i + 3; /* Not counting '\0' */ 1407 break; 1408 } 1409 } 1410 } 1411 } 1412 1413 len += input_print_modalias_bits(buf + len, size - len, 1414 'r', id->relbit, 0, REL_MAX); 1415 len += input_print_modalias_bits(buf + len, size - len, 1416 'a', id->absbit, 0, ABS_MAX); 1417 len += input_print_modalias_bits(buf + len, size - len, 1418 'm', id->mscbit, 0, MSC_MAX); 1419 len += input_print_modalias_bits(buf + len, size - len, 1420 'l', id->ledbit, 0, LED_MAX); 1421 len += input_print_modalias_bits(buf + len, size - len, 1422 's', id->sndbit, 0, SND_MAX); 1423 len += input_print_modalias_bits(buf + len, size - len, 1424 'f', id->ffbit, 0, FF_MAX); 1425 len += input_print_modalias_bits(buf + len, size - len, 1426 'w', id->swbit, 0, SW_MAX); 1427 1428 return len; 1429 } 1430 1431 static int input_print_modalias(char *buf, int size, const struct input_dev *id) 1432 { 1433 int full_len; 1434 1435 /* 1436 * Printing is done in 2 passes: first one figures out total length 1437 * needed for the modalias string, second one will try to trim key 1438 * data in case when buffer is too small for the entire modalias. 1439 * If the buffer is too small regardless, it will fill as much as it 1440 * can (without trimming key data) into the buffer and leave it to 1441 * the caller to figure out what to do with the result. 1442 */ 1443 full_len = input_print_modalias_parts(NULL, 0, 0, id); 1444 return input_print_modalias_parts(buf, size, full_len, id); 1445 } 1446 1447 static ssize_t input_dev_show_modalias(struct device *dev, 1448 struct device_attribute *attr, 1449 char *buf) 1450 { 1451 struct input_dev *id = to_input_dev(dev); 1452 ssize_t len; 1453 1454 len = input_print_modalias(buf, PAGE_SIZE, id); 1455 if (len < PAGE_SIZE - 2) 1456 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1457 1458 return min_t(int, len, PAGE_SIZE); 1459 } 1460 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1461 1462 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1463 int max, int add_cr); 1464 1465 static ssize_t input_dev_show_properties(struct device *dev, 1466 struct device_attribute *attr, 1467 char *buf) 1468 { 1469 struct input_dev *input_dev = to_input_dev(dev); 1470 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1471 INPUT_PROP_MAX, true); 1472 return min_t(int, len, PAGE_SIZE); 1473 } 1474 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1475 1476 static int input_inhibit_device(struct input_dev *dev); 1477 static int input_uninhibit_device(struct input_dev *dev); 1478 1479 static ssize_t inhibited_show(struct device *dev, 1480 struct device_attribute *attr, 1481 char *buf) 1482 { 1483 struct input_dev *input_dev = to_input_dev(dev); 1484 1485 return sysfs_emit(buf, "%d\n", input_dev->inhibited); 1486 } 1487 1488 static ssize_t inhibited_store(struct device *dev, 1489 struct device_attribute *attr, const char *buf, 1490 size_t len) 1491 { 1492 struct input_dev *input_dev = to_input_dev(dev); 1493 ssize_t rv; 1494 bool inhibited; 1495 1496 if (kstrtobool(buf, &inhibited)) 1497 return -EINVAL; 1498 1499 if (inhibited) 1500 rv = input_inhibit_device(input_dev); 1501 else 1502 rv = input_uninhibit_device(input_dev); 1503 1504 if (rv != 0) 1505 return rv; 1506 1507 return len; 1508 } 1509 1510 static DEVICE_ATTR_RW(inhibited); 1511 1512 static struct attribute *input_dev_attrs[] = { 1513 &dev_attr_name.attr, 1514 &dev_attr_phys.attr, 1515 &dev_attr_uniq.attr, 1516 &dev_attr_modalias.attr, 1517 &dev_attr_properties.attr, 1518 &dev_attr_inhibited.attr, 1519 NULL 1520 }; 1521 1522 static const struct attribute_group input_dev_attr_group = { 1523 .attrs = input_dev_attrs, 1524 }; 1525 1526 #define INPUT_DEV_ID_ATTR(name) \ 1527 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1528 struct device_attribute *attr, \ 1529 char *buf) \ 1530 { \ 1531 struct input_dev *input_dev = to_input_dev(dev); \ 1532 return sysfs_emit(buf, "%04x\n", input_dev->id.name); \ 1533 } \ 1534 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1535 1536 INPUT_DEV_ID_ATTR(bustype); 1537 INPUT_DEV_ID_ATTR(vendor); 1538 INPUT_DEV_ID_ATTR(product); 1539 INPUT_DEV_ID_ATTR(version); 1540 1541 static struct attribute *input_dev_id_attrs[] = { 1542 &dev_attr_bustype.attr, 1543 &dev_attr_vendor.attr, 1544 &dev_attr_product.attr, 1545 &dev_attr_version.attr, 1546 NULL 1547 }; 1548 1549 static const struct attribute_group input_dev_id_attr_group = { 1550 .name = "id", 1551 .attrs = input_dev_id_attrs, 1552 }; 1553 1554 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1555 int max, int add_cr) 1556 { 1557 int i; 1558 int len = 0; 1559 bool skip_empty = true; 1560 1561 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1562 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1563 bitmap[i], skip_empty); 1564 if (len) { 1565 skip_empty = false; 1566 if (i > 0) 1567 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1568 } 1569 } 1570 1571 /* 1572 * If no output was produced print a single 0. 1573 */ 1574 if (len == 0) 1575 len = snprintf(buf, buf_size, "%d", 0); 1576 1577 if (add_cr) 1578 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1579 1580 return len; 1581 } 1582 1583 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1584 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1585 struct device_attribute *attr, \ 1586 char *buf) \ 1587 { \ 1588 struct input_dev *input_dev = to_input_dev(dev); \ 1589 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1590 input_dev->bm##bit, ev##_MAX, \ 1591 true); \ 1592 return min_t(int, len, PAGE_SIZE); \ 1593 } \ 1594 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1595 1596 INPUT_DEV_CAP_ATTR(EV, ev); 1597 INPUT_DEV_CAP_ATTR(KEY, key); 1598 INPUT_DEV_CAP_ATTR(REL, rel); 1599 INPUT_DEV_CAP_ATTR(ABS, abs); 1600 INPUT_DEV_CAP_ATTR(MSC, msc); 1601 INPUT_DEV_CAP_ATTR(LED, led); 1602 INPUT_DEV_CAP_ATTR(SND, snd); 1603 INPUT_DEV_CAP_ATTR(FF, ff); 1604 INPUT_DEV_CAP_ATTR(SW, sw); 1605 1606 static struct attribute *input_dev_caps_attrs[] = { 1607 &dev_attr_ev.attr, 1608 &dev_attr_key.attr, 1609 &dev_attr_rel.attr, 1610 &dev_attr_abs.attr, 1611 &dev_attr_msc.attr, 1612 &dev_attr_led.attr, 1613 &dev_attr_snd.attr, 1614 &dev_attr_ff.attr, 1615 &dev_attr_sw.attr, 1616 NULL 1617 }; 1618 1619 static const struct attribute_group input_dev_caps_attr_group = { 1620 .name = "capabilities", 1621 .attrs = input_dev_caps_attrs, 1622 }; 1623 1624 static const struct attribute_group *input_dev_attr_groups[] = { 1625 &input_dev_attr_group, 1626 &input_dev_id_attr_group, 1627 &input_dev_caps_attr_group, 1628 &input_poller_attribute_group, 1629 NULL 1630 }; 1631 1632 static void input_dev_release(struct device *device) 1633 { 1634 struct input_dev *dev = to_input_dev(device); 1635 1636 input_ff_destroy(dev); 1637 input_mt_destroy_slots(dev); 1638 kfree(dev->poller); 1639 kfree(dev->absinfo); 1640 kfree(dev->vals); 1641 kfree(dev); 1642 1643 module_put(THIS_MODULE); 1644 } 1645 1646 /* 1647 * Input uevent interface - loading event handlers based on 1648 * device bitfields. 1649 */ 1650 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1651 const char *name, const unsigned long *bitmap, int max) 1652 { 1653 int len; 1654 1655 if (add_uevent_var(env, "%s", name)) 1656 return -ENOMEM; 1657 1658 len = input_print_bitmap(&env->buf[env->buflen - 1], 1659 sizeof(env->buf) - env->buflen, 1660 bitmap, max, false); 1661 if (len >= (sizeof(env->buf) - env->buflen)) 1662 return -ENOMEM; 1663 1664 env->buflen += len; 1665 return 0; 1666 } 1667 1668 /* 1669 * This is a pretty gross hack. When building uevent data the driver core 1670 * may try adding more environment variables to kobj_uevent_env without 1671 * telling us, so we have no idea how much of the buffer we can use to 1672 * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially 1673 * reduce amount of memory we will use for the modalias environment variable. 1674 * 1675 * The potential additions are: 1676 * 1677 * SEQNUM=18446744073709551615 - (%llu - 28 bytes) 1678 * HOME=/ (6 bytes) 1679 * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes) 1680 * 1681 * 68 bytes total. Allow extra buffer - 96 bytes 1682 */ 1683 #define UEVENT_ENV_EXTRA_LEN 96 1684 1685 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1686 const struct input_dev *dev) 1687 { 1688 int len; 1689 1690 if (add_uevent_var(env, "MODALIAS=")) 1691 return -ENOMEM; 1692 1693 len = input_print_modalias(&env->buf[env->buflen - 1], 1694 (int)sizeof(env->buf) - env->buflen - 1695 UEVENT_ENV_EXTRA_LEN, 1696 dev); 1697 if (len >= ((int)sizeof(env->buf) - env->buflen - 1698 UEVENT_ENV_EXTRA_LEN)) 1699 return -ENOMEM; 1700 1701 env->buflen += len; 1702 return 0; 1703 } 1704 1705 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1706 do { \ 1707 int err = add_uevent_var(env, fmt, val); \ 1708 if (err) \ 1709 return err; \ 1710 } while (0) 1711 1712 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1713 do { \ 1714 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1715 if (err) \ 1716 return err; \ 1717 } while (0) 1718 1719 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1720 do { \ 1721 int err = input_add_uevent_modalias_var(env, dev); \ 1722 if (err) \ 1723 return err; \ 1724 } while (0) 1725 1726 static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env) 1727 { 1728 const struct input_dev *dev = to_input_dev(device); 1729 1730 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1731 dev->id.bustype, dev->id.vendor, 1732 dev->id.product, dev->id.version); 1733 if (dev->name) 1734 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1735 if (dev->phys) 1736 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1737 if (dev->uniq) 1738 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1739 1740 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1741 1742 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1743 if (test_bit(EV_KEY, dev->evbit)) 1744 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1745 if (test_bit(EV_REL, dev->evbit)) 1746 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1747 if (test_bit(EV_ABS, dev->evbit)) 1748 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1749 if (test_bit(EV_MSC, dev->evbit)) 1750 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1751 if (test_bit(EV_LED, dev->evbit)) 1752 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1753 if (test_bit(EV_SND, dev->evbit)) 1754 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1755 if (test_bit(EV_FF, dev->evbit)) 1756 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1757 if (test_bit(EV_SW, dev->evbit)) 1758 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1759 1760 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1761 1762 return 0; 1763 } 1764 1765 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1766 do { \ 1767 int i; \ 1768 bool active; \ 1769 \ 1770 if (!test_bit(EV_##type, dev->evbit)) \ 1771 break; \ 1772 \ 1773 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1774 active = test_bit(i, dev->bits); \ 1775 if (!active && !on) \ 1776 continue; \ 1777 \ 1778 dev->event(dev, EV_##type, i, on ? active : 0); \ 1779 } \ 1780 } while (0) 1781 1782 static void input_dev_toggle(struct input_dev *dev, bool activate) 1783 { 1784 if (!dev->event) 1785 return; 1786 1787 INPUT_DO_TOGGLE(dev, LED, led, activate); 1788 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1789 1790 if (activate && test_bit(EV_REP, dev->evbit)) { 1791 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1792 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1793 } 1794 } 1795 1796 /** 1797 * input_reset_device() - reset/restore the state of input device 1798 * @dev: input device whose state needs to be reset 1799 * 1800 * This function tries to reset the state of an opened input device and 1801 * bring internal state and state if the hardware in sync with each other. 1802 * We mark all keys as released, restore LED state, repeat rate, etc. 1803 */ 1804 void input_reset_device(struct input_dev *dev) 1805 { 1806 unsigned long flags; 1807 1808 mutex_lock(&dev->mutex); 1809 spin_lock_irqsave(&dev->event_lock, flags); 1810 1811 input_dev_toggle(dev, true); 1812 if (input_dev_release_keys(dev)) 1813 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1814 1815 spin_unlock_irqrestore(&dev->event_lock, flags); 1816 mutex_unlock(&dev->mutex); 1817 } 1818 EXPORT_SYMBOL(input_reset_device); 1819 1820 static int input_inhibit_device(struct input_dev *dev) 1821 { 1822 mutex_lock(&dev->mutex); 1823 1824 if (dev->inhibited) 1825 goto out; 1826 1827 if (dev->users) { 1828 if (dev->close) 1829 dev->close(dev); 1830 if (dev->poller) 1831 input_dev_poller_stop(dev->poller); 1832 } 1833 1834 spin_lock_irq(&dev->event_lock); 1835 input_mt_release_slots(dev); 1836 input_dev_release_keys(dev); 1837 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1838 input_dev_toggle(dev, false); 1839 spin_unlock_irq(&dev->event_lock); 1840 1841 dev->inhibited = true; 1842 1843 out: 1844 mutex_unlock(&dev->mutex); 1845 return 0; 1846 } 1847 1848 static int input_uninhibit_device(struct input_dev *dev) 1849 { 1850 int ret = 0; 1851 1852 mutex_lock(&dev->mutex); 1853 1854 if (!dev->inhibited) 1855 goto out; 1856 1857 if (dev->users) { 1858 if (dev->open) { 1859 ret = dev->open(dev); 1860 if (ret) 1861 goto out; 1862 } 1863 if (dev->poller) 1864 input_dev_poller_start(dev->poller); 1865 } 1866 1867 dev->inhibited = false; 1868 spin_lock_irq(&dev->event_lock); 1869 input_dev_toggle(dev, true); 1870 spin_unlock_irq(&dev->event_lock); 1871 1872 out: 1873 mutex_unlock(&dev->mutex); 1874 return ret; 1875 } 1876 1877 static int input_dev_suspend(struct device *dev) 1878 { 1879 struct input_dev *input_dev = to_input_dev(dev); 1880 1881 spin_lock_irq(&input_dev->event_lock); 1882 1883 /* 1884 * Keys that are pressed now are unlikely to be 1885 * still pressed when we resume. 1886 */ 1887 if (input_dev_release_keys(input_dev)) 1888 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1889 1890 /* Turn off LEDs and sounds, if any are active. */ 1891 input_dev_toggle(input_dev, false); 1892 1893 spin_unlock_irq(&input_dev->event_lock); 1894 1895 return 0; 1896 } 1897 1898 static int input_dev_resume(struct device *dev) 1899 { 1900 struct input_dev *input_dev = to_input_dev(dev); 1901 1902 spin_lock_irq(&input_dev->event_lock); 1903 1904 /* Restore state of LEDs and sounds, if any were active. */ 1905 input_dev_toggle(input_dev, true); 1906 1907 spin_unlock_irq(&input_dev->event_lock); 1908 1909 return 0; 1910 } 1911 1912 static int input_dev_freeze(struct device *dev) 1913 { 1914 struct input_dev *input_dev = to_input_dev(dev); 1915 1916 spin_lock_irq(&input_dev->event_lock); 1917 1918 /* 1919 * Keys that are pressed now are unlikely to be 1920 * still pressed when we resume. 1921 */ 1922 if (input_dev_release_keys(input_dev)) 1923 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1924 1925 spin_unlock_irq(&input_dev->event_lock); 1926 1927 return 0; 1928 } 1929 1930 static int input_dev_poweroff(struct device *dev) 1931 { 1932 struct input_dev *input_dev = to_input_dev(dev); 1933 1934 spin_lock_irq(&input_dev->event_lock); 1935 1936 /* Turn off LEDs and sounds, if any are active. */ 1937 input_dev_toggle(input_dev, false); 1938 1939 spin_unlock_irq(&input_dev->event_lock); 1940 1941 return 0; 1942 } 1943 1944 static const struct dev_pm_ops input_dev_pm_ops = { 1945 .suspend = input_dev_suspend, 1946 .resume = input_dev_resume, 1947 .freeze = input_dev_freeze, 1948 .poweroff = input_dev_poweroff, 1949 .restore = input_dev_resume, 1950 }; 1951 1952 static const struct device_type input_dev_type = { 1953 .groups = input_dev_attr_groups, 1954 .release = input_dev_release, 1955 .uevent = input_dev_uevent, 1956 .pm = pm_sleep_ptr(&input_dev_pm_ops), 1957 }; 1958 1959 static char *input_devnode(const struct device *dev, umode_t *mode) 1960 { 1961 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1962 } 1963 1964 const struct class input_class = { 1965 .name = "input", 1966 .devnode = input_devnode, 1967 }; 1968 EXPORT_SYMBOL_GPL(input_class); 1969 1970 /** 1971 * input_allocate_device - allocate memory for new input device 1972 * 1973 * Returns prepared struct input_dev or %NULL. 1974 * 1975 * NOTE: Use input_free_device() to free devices that have not been 1976 * registered; input_unregister_device() should be used for already 1977 * registered devices. 1978 */ 1979 struct input_dev *input_allocate_device(void) 1980 { 1981 static atomic_t input_no = ATOMIC_INIT(-1); 1982 struct input_dev *dev; 1983 1984 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1985 if (!dev) 1986 return NULL; 1987 1988 mutex_init(&dev->mutex); 1989 spin_lock_init(&dev->event_lock); 1990 timer_setup(&dev->timer, NULL, 0); 1991 INIT_LIST_HEAD(&dev->h_list); 1992 INIT_LIST_HEAD(&dev->node); 1993 1994 dev->dev.type = &input_dev_type; 1995 dev->dev.class = &input_class; 1996 device_initialize(&dev->dev); 1997 /* 1998 * From this point on we can no longer simply "kfree(dev)", we need 1999 * to use input_free_device() so that device core properly frees its 2000 * resources associated with the input device. 2001 */ 2002 2003 dev_set_name(&dev->dev, "input%lu", 2004 (unsigned long)atomic_inc_return(&input_no)); 2005 2006 __module_get(THIS_MODULE); 2007 2008 return dev; 2009 } 2010 EXPORT_SYMBOL(input_allocate_device); 2011 2012 struct input_devres { 2013 struct input_dev *input; 2014 }; 2015 2016 static int devm_input_device_match(struct device *dev, void *res, void *data) 2017 { 2018 struct input_devres *devres = res; 2019 2020 return devres->input == data; 2021 } 2022 2023 static void devm_input_device_release(struct device *dev, void *res) 2024 { 2025 struct input_devres *devres = res; 2026 struct input_dev *input = devres->input; 2027 2028 dev_dbg(dev, "%s: dropping reference to %s\n", 2029 __func__, dev_name(&input->dev)); 2030 input_put_device(input); 2031 } 2032 2033 /** 2034 * devm_input_allocate_device - allocate managed input device 2035 * @dev: device owning the input device being created 2036 * 2037 * Returns prepared struct input_dev or %NULL. 2038 * 2039 * Managed input devices do not need to be explicitly unregistered or 2040 * freed as it will be done automatically when owner device unbinds from 2041 * its driver (or binding fails). Once managed input device is allocated, 2042 * it is ready to be set up and registered in the same fashion as regular 2043 * input device. There are no special devm_input_device_[un]register() 2044 * variants, regular ones work with both managed and unmanaged devices, 2045 * should you need them. In most cases however, managed input device need 2046 * not be explicitly unregistered or freed. 2047 * 2048 * NOTE: the owner device is set up as parent of input device and users 2049 * should not override it. 2050 */ 2051 struct input_dev *devm_input_allocate_device(struct device *dev) 2052 { 2053 struct input_dev *input; 2054 struct input_devres *devres; 2055 2056 devres = devres_alloc(devm_input_device_release, 2057 sizeof(*devres), GFP_KERNEL); 2058 if (!devres) 2059 return NULL; 2060 2061 input = input_allocate_device(); 2062 if (!input) { 2063 devres_free(devres); 2064 return NULL; 2065 } 2066 2067 input->dev.parent = dev; 2068 input->devres_managed = true; 2069 2070 devres->input = input; 2071 devres_add(dev, devres); 2072 2073 return input; 2074 } 2075 EXPORT_SYMBOL(devm_input_allocate_device); 2076 2077 /** 2078 * input_free_device - free memory occupied by input_dev structure 2079 * @dev: input device to free 2080 * 2081 * This function should only be used if input_register_device() 2082 * was not called yet or if it failed. Once device was registered 2083 * use input_unregister_device() and memory will be freed once last 2084 * reference to the device is dropped. 2085 * 2086 * Device should be allocated by input_allocate_device(). 2087 * 2088 * NOTE: If there are references to the input device then memory 2089 * will not be freed until last reference is dropped. 2090 */ 2091 void input_free_device(struct input_dev *dev) 2092 { 2093 if (dev) { 2094 if (dev->devres_managed) 2095 WARN_ON(devres_destroy(dev->dev.parent, 2096 devm_input_device_release, 2097 devm_input_device_match, 2098 dev)); 2099 input_put_device(dev); 2100 } 2101 } 2102 EXPORT_SYMBOL(input_free_device); 2103 2104 /** 2105 * input_set_timestamp - set timestamp for input events 2106 * @dev: input device to set timestamp for 2107 * @timestamp: the time at which the event has occurred 2108 * in CLOCK_MONOTONIC 2109 * 2110 * This function is intended to provide to the input system a more 2111 * accurate time of when an event actually occurred. The driver should 2112 * call this function as soon as a timestamp is acquired ensuring 2113 * clock conversions in input_set_timestamp are done correctly. 2114 * 2115 * The system entering suspend state between timestamp acquisition and 2116 * calling input_set_timestamp can result in inaccurate conversions. 2117 */ 2118 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 2119 { 2120 dev->timestamp[INPUT_CLK_MONO] = timestamp; 2121 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 2122 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 2123 TK_OFFS_BOOT); 2124 } 2125 EXPORT_SYMBOL(input_set_timestamp); 2126 2127 /** 2128 * input_get_timestamp - get timestamp for input events 2129 * @dev: input device to get timestamp from 2130 * 2131 * A valid timestamp is a timestamp of non-zero value. 2132 */ 2133 ktime_t *input_get_timestamp(struct input_dev *dev) 2134 { 2135 const ktime_t invalid_timestamp = ktime_set(0, 0); 2136 2137 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 2138 input_set_timestamp(dev, ktime_get()); 2139 2140 return dev->timestamp; 2141 } 2142 EXPORT_SYMBOL(input_get_timestamp); 2143 2144 /** 2145 * input_set_capability - mark device as capable of a certain event 2146 * @dev: device that is capable of emitting or accepting event 2147 * @type: type of the event (EV_KEY, EV_REL, etc...) 2148 * @code: event code 2149 * 2150 * In addition to setting up corresponding bit in appropriate capability 2151 * bitmap the function also adjusts dev->evbit. 2152 */ 2153 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 2154 { 2155 if (type < EV_CNT && input_max_code[type] && 2156 code > input_max_code[type]) { 2157 pr_err("%s: invalid code %u for type %u\n", __func__, code, 2158 type); 2159 dump_stack(); 2160 return; 2161 } 2162 2163 switch (type) { 2164 case EV_KEY: 2165 __set_bit(code, dev->keybit); 2166 break; 2167 2168 case EV_REL: 2169 __set_bit(code, dev->relbit); 2170 break; 2171 2172 case EV_ABS: 2173 input_alloc_absinfo(dev); 2174 __set_bit(code, dev->absbit); 2175 break; 2176 2177 case EV_MSC: 2178 __set_bit(code, dev->mscbit); 2179 break; 2180 2181 case EV_SW: 2182 __set_bit(code, dev->swbit); 2183 break; 2184 2185 case EV_LED: 2186 __set_bit(code, dev->ledbit); 2187 break; 2188 2189 case EV_SND: 2190 __set_bit(code, dev->sndbit); 2191 break; 2192 2193 case EV_FF: 2194 __set_bit(code, dev->ffbit); 2195 break; 2196 2197 case EV_PWR: 2198 /* do nothing */ 2199 break; 2200 2201 default: 2202 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2203 dump_stack(); 2204 return; 2205 } 2206 2207 __set_bit(type, dev->evbit); 2208 } 2209 EXPORT_SYMBOL(input_set_capability); 2210 2211 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2212 { 2213 int mt_slots; 2214 int i; 2215 unsigned int events; 2216 2217 if (dev->mt) { 2218 mt_slots = dev->mt->num_slots; 2219 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2220 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2221 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 2222 mt_slots = clamp(mt_slots, 2, 32); 2223 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2224 mt_slots = 2; 2225 } else { 2226 mt_slots = 0; 2227 } 2228 2229 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2230 2231 if (test_bit(EV_ABS, dev->evbit)) 2232 for_each_set_bit(i, dev->absbit, ABS_CNT) 2233 events += input_is_mt_axis(i) ? mt_slots : 1; 2234 2235 if (test_bit(EV_REL, dev->evbit)) 2236 events += bitmap_weight(dev->relbit, REL_CNT); 2237 2238 /* Make room for KEY and MSC events */ 2239 events += 7; 2240 2241 return events; 2242 } 2243 2244 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2245 do { \ 2246 if (!test_bit(EV_##type, dev->evbit)) \ 2247 memset(dev->bits##bit, 0, \ 2248 sizeof(dev->bits##bit)); \ 2249 } while (0) 2250 2251 static void input_cleanse_bitmasks(struct input_dev *dev) 2252 { 2253 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2254 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2255 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2256 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2257 INPUT_CLEANSE_BITMASK(dev, LED, led); 2258 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2259 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2260 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2261 } 2262 2263 static void __input_unregister_device(struct input_dev *dev) 2264 { 2265 struct input_handle *handle, *next; 2266 2267 input_disconnect_device(dev); 2268 2269 mutex_lock(&input_mutex); 2270 2271 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2272 handle->handler->disconnect(handle); 2273 WARN_ON(!list_empty(&dev->h_list)); 2274 2275 del_timer_sync(&dev->timer); 2276 list_del_init(&dev->node); 2277 2278 input_wakeup_procfs_readers(); 2279 2280 mutex_unlock(&input_mutex); 2281 2282 device_del(&dev->dev); 2283 } 2284 2285 static void devm_input_device_unregister(struct device *dev, void *res) 2286 { 2287 struct input_devres *devres = res; 2288 struct input_dev *input = devres->input; 2289 2290 dev_dbg(dev, "%s: unregistering device %s\n", 2291 __func__, dev_name(&input->dev)); 2292 __input_unregister_device(input); 2293 } 2294 2295 /* 2296 * Generate software autorepeat event. Note that we take 2297 * dev->event_lock here to avoid racing with input_event 2298 * which may cause keys get "stuck". 2299 */ 2300 static void input_repeat_key(struct timer_list *t) 2301 { 2302 struct input_dev *dev = from_timer(dev, t, timer); 2303 unsigned long flags; 2304 2305 spin_lock_irqsave(&dev->event_lock, flags); 2306 2307 if (!dev->inhibited && 2308 test_bit(dev->repeat_key, dev->key) && 2309 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 2310 2311 input_set_timestamp(dev, ktime_get()); 2312 input_handle_event(dev, EV_KEY, dev->repeat_key, 2); 2313 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 2314 2315 if (dev->rep[REP_PERIOD]) 2316 mod_timer(&dev->timer, jiffies + 2317 msecs_to_jiffies(dev->rep[REP_PERIOD])); 2318 } 2319 2320 spin_unlock_irqrestore(&dev->event_lock, flags); 2321 } 2322 2323 /** 2324 * input_enable_softrepeat - enable software autorepeat 2325 * @dev: input device 2326 * @delay: repeat delay 2327 * @period: repeat period 2328 * 2329 * Enable software autorepeat on the input device. 2330 */ 2331 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2332 { 2333 dev->timer.function = input_repeat_key; 2334 dev->rep[REP_DELAY] = delay; 2335 dev->rep[REP_PERIOD] = period; 2336 } 2337 EXPORT_SYMBOL(input_enable_softrepeat); 2338 2339 bool input_device_enabled(struct input_dev *dev) 2340 { 2341 lockdep_assert_held(&dev->mutex); 2342 2343 return !dev->inhibited && dev->users > 0; 2344 } 2345 EXPORT_SYMBOL_GPL(input_device_enabled); 2346 2347 /** 2348 * input_register_device - register device with input core 2349 * @dev: device to be registered 2350 * 2351 * This function registers device with input core. The device must be 2352 * allocated with input_allocate_device() and all it's capabilities 2353 * set up before registering. 2354 * If function fails the device must be freed with input_free_device(). 2355 * Once device has been successfully registered it can be unregistered 2356 * with input_unregister_device(); input_free_device() should not be 2357 * called in this case. 2358 * 2359 * Note that this function is also used to register managed input devices 2360 * (ones allocated with devm_input_allocate_device()). Such managed input 2361 * devices need not be explicitly unregistered or freed, their tear down 2362 * is controlled by the devres infrastructure. It is also worth noting 2363 * that tear down of managed input devices is internally a 2-step process: 2364 * registered managed input device is first unregistered, but stays in 2365 * memory and can still handle input_event() calls (although events will 2366 * not be delivered anywhere). The freeing of managed input device will 2367 * happen later, when devres stack is unwound to the point where device 2368 * allocation was made. 2369 */ 2370 int input_register_device(struct input_dev *dev) 2371 { 2372 struct input_devres *devres = NULL; 2373 struct input_handler *handler; 2374 unsigned int packet_size; 2375 const char *path; 2376 int error; 2377 2378 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2379 dev_err(&dev->dev, 2380 "Absolute device without dev->absinfo, refusing to register\n"); 2381 return -EINVAL; 2382 } 2383 2384 if (dev->devres_managed) { 2385 devres = devres_alloc(devm_input_device_unregister, 2386 sizeof(*devres), GFP_KERNEL); 2387 if (!devres) 2388 return -ENOMEM; 2389 2390 devres->input = dev; 2391 } 2392 2393 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2394 __set_bit(EV_SYN, dev->evbit); 2395 2396 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2397 __clear_bit(KEY_RESERVED, dev->keybit); 2398 2399 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2400 input_cleanse_bitmasks(dev); 2401 2402 packet_size = input_estimate_events_per_packet(dev); 2403 if (dev->hint_events_per_packet < packet_size) 2404 dev->hint_events_per_packet = packet_size; 2405 2406 dev->max_vals = dev->hint_events_per_packet + 2; 2407 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 2408 if (!dev->vals) { 2409 error = -ENOMEM; 2410 goto err_devres_free; 2411 } 2412 2413 /* 2414 * If delay and period are pre-set by the driver, then autorepeating 2415 * is handled by the driver itself and we don't do it in input.c. 2416 */ 2417 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2418 input_enable_softrepeat(dev, 250, 33); 2419 2420 if (!dev->getkeycode) 2421 dev->getkeycode = input_default_getkeycode; 2422 2423 if (!dev->setkeycode) 2424 dev->setkeycode = input_default_setkeycode; 2425 2426 if (dev->poller) 2427 input_dev_poller_finalize(dev->poller); 2428 2429 error = device_add(&dev->dev); 2430 if (error) 2431 goto err_free_vals; 2432 2433 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2434 pr_info("%s as %s\n", 2435 dev->name ? dev->name : "Unspecified device", 2436 path ? path : "N/A"); 2437 kfree(path); 2438 2439 error = mutex_lock_interruptible(&input_mutex); 2440 if (error) 2441 goto err_device_del; 2442 2443 list_add_tail(&dev->node, &input_dev_list); 2444 2445 list_for_each_entry(handler, &input_handler_list, node) 2446 input_attach_handler(dev, handler); 2447 2448 input_wakeup_procfs_readers(); 2449 2450 mutex_unlock(&input_mutex); 2451 2452 if (dev->devres_managed) { 2453 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2454 __func__, dev_name(&dev->dev)); 2455 devres_add(dev->dev.parent, devres); 2456 } 2457 return 0; 2458 2459 err_device_del: 2460 device_del(&dev->dev); 2461 err_free_vals: 2462 kfree(dev->vals); 2463 dev->vals = NULL; 2464 err_devres_free: 2465 devres_free(devres); 2466 return error; 2467 } 2468 EXPORT_SYMBOL(input_register_device); 2469 2470 /** 2471 * input_unregister_device - unregister previously registered device 2472 * @dev: device to be unregistered 2473 * 2474 * This function unregisters an input device. Once device is unregistered 2475 * the caller should not try to access it as it may get freed at any moment. 2476 */ 2477 void input_unregister_device(struct input_dev *dev) 2478 { 2479 if (dev->devres_managed) { 2480 WARN_ON(devres_destroy(dev->dev.parent, 2481 devm_input_device_unregister, 2482 devm_input_device_match, 2483 dev)); 2484 __input_unregister_device(dev); 2485 /* 2486 * We do not do input_put_device() here because it will be done 2487 * when 2nd devres fires up. 2488 */ 2489 } else { 2490 __input_unregister_device(dev); 2491 input_put_device(dev); 2492 } 2493 } 2494 EXPORT_SYMBOL(input_unregister_device); 2495 2496 static int input_handler_check_methods(const struct input_handler *handler) 2497 { 2498 int count = 0; 2499 2500 if (handler->filter) 2501 count++; 2502 if (handler->events) 2503 count++; 2504 if (handler->event) 2505 count++; 2506 2507 if (count > 1) { 2508 pr_err("%s: only one event processing method can be defined (%s)\n", 2509 __func__, handler->name); 2510 return -EINVAL; 2511 } 2512 2513 return 0; 2514 } 2515 2516 /* 2517 * An implementation of input_handler's events() method that simply 2518 * invokes handler->event() method for each event one by one. 2519 */ 2520 static unsigned int input_handler_events_default(struct input_handle *handle, 2521 struct input_value *vals, 2522 unsigned int count) 2523 { 2524 struct input_handler *handler = handle->handler; 2525 struct input_value *v; 2526 2527 for (v = vals; v != vals + count; v++) 2528 handler->event(handle, v->type, v->code, v->value); 2529 2530 return count; 2531 } 2532 2533 /* 2534 * An implementation of input_handler's events() method that invokes 2535 * handler->filter() method for each event one by one and removes events 2536 * that were filtered out from the "vals" array. 2537 */ 2538 static unsigned int input_handler_events_filter(struct input_handle *handle, 2539 struct input_value *vals, 2540 unsigned int count) 2541 { 2542 struct input_handler *handler = handle->handler; 2543 struct input_value *end = vals; 2544 struct input_value *v; 2545 2546 for (v = vals; v != vals + count; v++) { 2547 if (handler->filter(handle, v->type, v->code, v->value)) 2548 continue; 2549 if (end != v) 2550 *end = *v; 2551 end++; 2552 } 2553 2554 return end - vals; 2555 } 2556 2557 /* 2558 * An implementation of input_handler's events() method that does nothing. 2559 */ 2560 static unsigned int input_handler_events_null(struct input_handle *handle, 2561 struct input_value *vals, 2562 unsigned int count) 2563 { 2564 return count; 2565 } 2566 2567 /** 2568 * input_register_handler - register a new input handler 2569 * @handler: handler to be registered 2570 * 2571 * This function registers a new input handler (interface) for input 2572 * devices in the system and attaches it to all input devices that 2573 * are compatible with the handler. 2574 */ 2575 int input_register_handler(struct input_handler *handler) 2576 { 2577 struct input_dev *dev; 2578 int error; 2579 2580 error = input_handler_check_methods(handler); 2581 if (error) 2582 return error; 2583 2584 INIT_LIST_HEAD(&handler->h_list); 2585 2586 if (handler->filter) 2587 handler->events = input_handler_events_filter; 2588 else if (handler->event) 2589 handler->events = input_handler_events_default; 2590 else if (!handler->events) 2591 handler->events = input_handler_events_null; 2592 2593 error = mutex_lock_interruptible(&input_mutex); 2594 if (error) 2595 return error; 2596 2597 list_add_tail(&handler->node, &input_handler_list); 2598 2599 list_for_each_entry(dev, &input_dev_list, node) 2600 input_attach_handler(dev, handler); 2601 2602 input_wakeup_procfs_readers(); 2603 2604 mutex_unlock(&input_mutex); 2605 return 0; 2606 } 2607 EXPORT_SYMBOL(input_register_handler); 2608 2609 /** 2610 * input_unregister_handler - unregisters an input handler 2611 * @handler: handler to be unregistered 2612 * 2613 * This function disconnects a handler from its input devices and 2614 * removes it from lists of known handlers. 2615 */ 2616 void input_unregister_handler(struct input_handler *handler) 2617 { 2618 struct input_handle *handle, *next; 2619 2620 mutex_lock(&input_mutex); 2621 2622 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2623 handler->disconnect(handle); 2624 WARN_ON(!list_empty(&handler->h_list)); 2625 2626 list_del_init(&handler->node); 2627 2628 input_wakeup_procfs_readers(); 2629 2630 mutex_unlock(&input_mutex); 2631 } 2632 EXPORT_SYMBOL(input_unregister_handler); 2633 2634 /** 2635 * input_handler_for_each_handle - handle iterator 2636 * @handler: input handler to iterate 2637 * @data: data for the callback 2638 * @fn: function to be called for each handle 2639 * 2640 * Iterate over @bus's list of devices, and call @fn for each, passing 2641 * it @data and stop when @fn returns a non-zero value. The function is 2642 * using RCU to traverse the list and therefore may be using in atomic 2643 * contexts. The @fn callback is invoked from RCU critical section and 2644 * thus must not sleep. 2645 */ 2646 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2647 int (*fn)(struct input_handle *, void *)) 2648 { 2649 struct input_handle *handle; 2650 int retval = 0; 2651 2652 rcu_read_lock(); 2653 2654 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2655 retval = fn(handle, data); 2656 if (retval) 2657 break; 2658 } 2659 2660 rcu_read_unlock(); 2661 2662 return retval; 2663 } 2664 EXPORT_SYMBOL(input_handler_for_each_handle); 2665 2666 /** 2667 * input_register_handle - register a new input handle 2668 * @handle: handle to register 2669 * 2670 * This function puts a new input handle onto device's 2671 * and handler's lists so that events can flow through 2672 * it once it is opened using input_open_device(). 2673 * 2674 * This function is supposed to be called from handler's 2675 * connect() method. 2676 */ 2677 int input_register_handle(struct input_handle *handle) 2678 { 2679 struct input_handler *handler = handle->handler; 2680 struct input_dev *dev = handle->dev; 2681 int error; 2682 2683 /* 2684 * We take dev->mutex here to prevent race with 2685 * input_release_device(). 2686 */ 2687 error = mutex_lock_interruptible(&dev->mutex); 2688 if (error) 2689 return error; 2690 2691 /* 2692 * Filters go to the head of the list, normal handlers 2693 * to the tail. 2694 */ 2695 if (handler->filter) 2696 list_add_rcu(&handle->d_node, &dev->h_list); 2697 else 2698 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2699 2700 mutex_unlock(&dev->mutex); 2701 2702 /* 2703 * Since we are supposed to be called from ->connect() 2704 * which is mutually exclusive with ->disconnect() 2705 * we can't be racing with input_unregister_handle() 2706 * and so separate lock is not needed here. 2707 */ 2708 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2709 2710 if (handler->start) 2711 handler->start(handle); 2712 2713 return 0; 2714 } 2715 EXPORT_SYMBOL(input_register_handle); 2716 2717 /** 2718 * input_unregister_handle - unregister an input handle 2719 * @handle: handle to unregister 2720 * 2721 * This function removes input handle from device's 2722 * and handler's lists. 2723 * 2724 * This function is supposed to be called from handler's 2725 * disconnect() method. 2726 */ 2727 void input_unregister_handle(struct input_handle *handle) 2728 { 2729 struct input_dev *dev = handle->dev; 2730 2731 list_del_rcu(&handle->h_node); 2732 2733 /* 2734 * Take dev->mutex to prevent race with input_release_device(). 2735 */ 2736 mutex_lock(&dev->mutex); 2737 list_del_rcu(&handle->d_node); 2738 mutex_unlock(&dev->mutex); 2739 2740 synchronize_rcu(); 2741 } 2742 EXPORT_SYMBOL(input_unregister_handle); 2743 2744 /** 2745 * input_get_new_minor - allocates a new input minor number 2746 * @legacy_base: beginning or the legacy range to be searched 2747 * @legacy_num: size of legacy range 2748 * @allow_dynamic: whether we can also take ID from the dynamic range 2749 * 2750 * This function allocates a new device minor for from input major namespace. 2751 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2752 * parameters and whether ID can be allocated from dynamic range if there are 2753 * no free IDs in legacy range. 2754 */ 2755 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2756 bool allow_dynamic) 2757 { 2758 /* 2759 * This function should be called from input handler's ->connect() 2760 * methods, which are serialized with input_mutex, so no additional 2761 * locking is needed here. 2762 */ 2763 if (legacy_base >= 0) { 2764 int minor = ida_alloc_range(&input_ida, legacy_base, 2765 legacy_base + legacy_num - 1, 2766 GFP_KERNEL); 2767 if (minor >= 0 || !allow_dynamic) 2768 return minor; 2769 } 2770 2771 return ida_alloc_range(&input_ida, INPUT_FIRST_DYNAMIC_DEV, 2772 INPUT_MAX_CHAR_DEVICES - 1, GFP_KERNEL); 2773 } 2774 EXPORT_SYMBOL(input_get_new_minor); 2775 2776 /** 2777 * input_free_minor - release previously allocated minor 2778 * @minor: minor to be released 2779 * 2780 * This function releases previously allocated input minor so that it can be 2781 * reused later. 2782 */ 2783 void input_free_minor(unsigned int minor) 2784 { 2785 ida_free(&input_ida, minor); 2786 } 2787 EXPORT_SYMBOL(input_free_minor); 2788 2789 static int __init input_init(void) 2790 { 2791 int err; 2792 2793 err = class_register(&input_class); 2794 if (err) { 2795 pr_err("unable to register input_dev class\n"); 2796 return err; 2797 } 2798 2799 err = input_proc_init(); 2800 if (err) 2801 goto fail1; 2802 2803 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2804 INPUT_MAX_CHAR_DEVICES, "input"); 2805 if (err) { 2806 pr_err("unable to register char major %d", INPUT_MAJOR); 2807 goto fail2; 2808 } 2809 2810 return 0; 2811 2812 fail2: input_proc_exit(); 2813 fail1: class_unregister(&input_class); 2814 return err; 2815 } 2816 2817 static void __exit input_exit(void) 2818 { 2819 input_proc_exit(); 2820 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2821 INPUT_MAX_CHAR_DEVICES); 2822 class_unregister(&input_class); 2823 } 2824 2825 subsys_initcall(input_init); 2826 module_exit(input_exit); 2827