1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/idr.h> 14 #include <linux/input/mt.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/major.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/pm.h> 23 #include <linux/poll.h> 24 #include <linux/device.h> 25 #include <linux/kstrtox.h> 26 #include <linux/mutex.h> 27 #include <linux/rcupdate.h> 28 #include "input-compat.h" 29 #include "input-core-private.h" 30 #include "input-poller.h" 31 32 MODULE_AUTHOR("Vojtech Pavlik <[email protected]>"); 33 MODULE_DESCRIPTION("Input core"); 34 MODULE_LICENSE("GPL"); 35 36 #define INPUT_MAX_CHAR_DEVICES 1024 37 #define INPUT_FIRST_DYNAMIC_DEV 256 38 static DEFINE_IDA(input_ida); 39 40 static LIST_HEAD(input_dev_list); 41 static LIST_HEAD(input_handler_list); 42 43 /* 44 * input_mutex protects access to both input_dev_list and input_handler_list. 45 * This also causes input_[un]register_device and input_[un]register_handler 46 * be mutually exclusive which simplifies locking in drivers implementing 47 * input handlers. 48 */ 49 static DEFINE_MUTEX(input_mutex); 50 51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 52 53 static const unsigned int input_max_code[EV_CNT] = { 54 [EV_KEY] = KEY_MAX, 55 [EV_REL] = REL_MAX, 56 [EV_ABS] = ABS_MAX, 57 [EV_MSC] = MSC_MAX, 58 [EV_SW] = SW_MAX, 59 [EV_LED] = LED_MAX, 60 [EV_SND] = SND_MAX, 61 [EV_FF] = FF_MAX, 62 }; 63 64 static inline int is_event_supported(unsigned int code, 65 unsigned long *bm, unsigned int max) 66 { 67 return code <= max && test_bit(code, bm); 68 } 69 70 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 71 { 72 if (fuzz) { 73 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 74 return old_val; 75 76 if (value > old_val - fuzz && value < old_val + fuzz) 77 return (old_val * 3 + value) / 4; 78 79 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 80 return (old_val + value) / 2; 81 } 82 83 return value; 84 } 85 86 static void input_start_autorepeat(struct input_dev *dev, int code) 87 { 88 if (test_bit(EV_REP, dev->evbit) && 89 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 90 dev->timer.function) { 91 dev->repeat_key = code; 92 mod_timer(&dev->timer, 93 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 94 } 95 } 96 97 static void input_stop_autorepeat(struct input_dev *dev) 98 { 99 del_timer(&dev->timer); 100 } 101 102 /* 103 * Pass values first through all filters and then, if event has not been 104 * filtered out, through all open handles. This order is achieved by placing 105 * filters at the head of the list of handles attached to the device, and 106 * placing regular handles at the tail of the list. 107 * 108 * This function is called with dev->event_lock held and interrupts disabled. 109 */ 110 static void input_pass_values(struct input_dev *dev, 111 struct input_value *vals, unsigned int count) 112 { 113 struct input_handle *handle; 114 struct input_value *v; 115 116 lockdep_assert_held(&dev->event_lock); 117 118 rcu_read_lock(); 119 120 handle = rcu_dereference(dev->grab); 121 if (handle) { 122 count = handle->handler->events(handle, vals, count); 123 } else { 124 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 125 if (handle->open) { 126 count = handle->handler->events(handle, vals, 127 count); 128 if (!count) 129 break; 130 } 131 } 132 133 rcu_read_unlock(); 134 135 /* trigger auto repeat for key events */ 136 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 137 for (v = vals; v != vals + count; v++) { 138 if (v->type == EV_KEY && v->value != 2) { 139 if (v->value) 140 input_start_autorepeat(dev, v->code); 141 else 142 input_stop_autorepeat(dev); 143 } 144 } 145 } 146 } 147 148 #define INPUT_IGNORE_EVENT 0 149 #define INPUT_PASS_TO_HANDLERS 1 150 #define INPUT_PASS_TO_DEVICE 2 151 #define INPUT_SLOT 4 152 #define INPUT_FLUSH 8 153 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 154 155 static int input_handle_abs_event(struct input_dev *dev, 156 unsigned int code, int *pval) 157 { 158 struct input_mt *mt = dev->mt; 159 bool is_new_slot = false; 160 bool is_mt_event; 161 int *pold; 162 163 if (code == ABS_MT_SLOT) { 164 /* 165 * "Stage" the event; we'll flush it later, when we 166 * get actual touch data. 167 */ 168 if (mt && *pval >= 0 && *pval < mt->num_slots) 169 mt->slot = *pval; 170 171 return INPUT_IGNORE_EVENT; 172 } 173 174 is_mt_event = input_is_mt_value(code); 175 176 if (!is_mt_event) { 177 pold = &dev->absinfo[code].value; 178 } else if (mt) { 179 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 180 is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value; 181 } else { 182 /* 183 * Bypass filtering for multi-touch events when 184 * not employing slots. 185 */ 186 pold = NULL; 187 } 188 189 if (pold) { 190 *pval = input_defuzz_abs_event(*pval, *pold, 191 dev->absinfo[code].fuzz); 192 if (*pold == *pval) 193 return INPUT_IGNORE_EVENT; 194 195 *pold = *pval; 196 } 197 198 /* Flush pending "slot" event */ 199 if (is_new_slot) { 200 dev->absinfo[ABS_MT_SLOT].value = mt->slot; 201 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 202 } 203 204 return INPUT_PASS_TO_HANDLERS; 205 } 206 207 static int input_get_disposition(struct input_dev *dev, 208 unsigned int type, unsigned int code, int *pval) 209 { 210 int disposition = INPUT_IGNORE_EVENT; 211 int value = *pval; 212 213 /* filter-out events from inhibited devices */ 214 if (dev->inhibited) 215 return INPUT_IGNORE_EVENT; 216 217 switch (type) { 218 219 case EV_SYN: 220 switch (code) { 221 case SYN_CONFIG: 222 disposition = INPUT_PASS_TO_ALL; 223 break; 224 225 case SYN_REPORT: 226 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 227 break; 228 case SYN_MT_REPORT: 229 disposition = INPUT_PASS_TO_HANDLERS; 230 break; 231 } 232 break; 233 234 case EV_KEY: 235 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 236 237 /* auto-repeat bypasses state updates */ 238 if (value == 2) { 239 disposition = INPUT_PASS_TO_HANDLERS; 240 break; 241 } 242 243 if (!!test_bit(code, dev->key) != !!value) { 244 245 __change_bit(code, dev->key); 246 disposition = INPUT_PASS_TO_HANDLERS; 247 } 248 } 249 break; 250 251 case EV_SW: 252 if (is_event_supported(code, dev->swbit, SW_MAX) && 253 !!test_bit(code, dev->sw) != !!value) { 254 255 __change_bit(code, dev->sw); 256 disposition = INPUT_PASS_TO_HANDLERS; 257 } 258 break; 259 260 case EV_ABS: 261 if (is_event_supported(code, dev->absbit, ABS_MAX)) 262 disposition = input_handle_abs_event(dev, code, &value); 263 264 break; 265 266 case EV_REL: 267 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 268 disposition = INPUT_PASS_TO_HANDLERS; 269 270 break; 271 272 case EV_MSC: 273 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 274 disposition = INPUT_PASS_TO_ALL; 275 276 break; 277 278 case EV_LED: 279 if (is_event_supported(code, dev->ledbit, LED_MAX) && 280 !!test_bit(code, dev->led) != !!value) { 281 282 __change_bit(code, dev->led); 283 disposition = INPUT_PASS_TO_ALL; 284 } 285 break; 286 287 case EV_SND: 288 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 289 290 if (!!test_bit(code, dev->snd) != !!value) 291 __change_bit(code, dev->snd); 292 disposition = INPUT_PASS_TO_ALL; 293 } 294 break; 295 296 case EV_REP: 297 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 298 dev->rep[code] = value; 299 disposition = INPUT_PASS_TO_ALL; 300 } 301 break; 302 303 case EV_FF: 304 if (value >= 0) 305 disposition = INPUT_PASS_TO_ALL; 306 break; 307 308 case EV_PWR: 309 disposition = INPUT_PASS_TO_ALL; 310 break; 311 } 312 313 *pval = value; 314 return disposition; 315 } 316 317 static void input_event_dispose(struct input_dev *dev, int disposition, 318 unsigned int type, unsigned int code, int value) 319 { 320 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 321 dev->event(dev, type, code, value); 322 323 if (disposition & INPUT_PASS_TO_HANDLERS) { 324 struct input_value *v; 325 326 if (disposition & INPUT_SLOT) { 327 v = &dev->vals[dev->num_vals++]; 328 v->type = EV_ABS; 329 v->code = ABS_MT_SLOT; 330 v->value = dev->mt->slot; 331 } 332 333 v = &dev->vals[dev->num_vals++]; 334 v->type = type; 335 v->code = code; 336 v->value = value; 337 } 338 339 if (disposition & INPUT_FLUSH) { 340 if (dev->num_vals >= 2) 341 input_pass_values(dev, dev->vals, dev->num_vals); 342 dev->num_vals = 0; 343 /* 344 * Reset the timestamp on flush so we won't end up 345 * with a stale one. Note we only need to reset the 346 * monolithic one as we use its presence when deciding 347 * whether to generate a synthetic timestamp. 348 */ 349 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 350 } else if (dev->num_vals >= dev->max_vals - 2) { 351 dev->vals[dev->num_vals++] = input_value_sync; 352 input_pass_values(dev, dev->vals, dev->num_vals); 353 dev->num_vals = 0; 354 } 355 } 356 357 void input_handle_event(struct input_dev *dev, 358 unsigned int type, unsigned int code, int value) 359 { 360 int disposition; 361 362 lockdep_assert_held(&dev->event_lock); 363 364 disposition = input_get_disposition(dev, type, code, &value); 365 if (disposition != INPUT_IGNORE_EVENT) { 366 if (type != EV_SYN) 367 add_input_randomness(type, code, value); 368 369 input_event_dispose(dev, disposition, type, code, value); 370 } 371 } 372 373 /** 374 * input_event() - report new input event 375 * @dev: device that generated the event 376 * @type: type of the event 377 * @code: event code 378 * @value: value of the event 379 * 380 * This function should be used by drivers implementing various input 381 * devices to report input events. See also input_inject_event(). 382 * 383 * NOTE: input_event() may be safely used right after input device was 384 * allocated with input_allocate_device(), even before it is registered 385 * with input_register_device(), but the event will not reach any of the 386 * input handlers. Such early invocation of input_event() may be used 387 * to 'seed' initial state of a switch or initial position of absolute 388 * axis, etc. 389 */ 390 void input_event(struct input_dev *dev, 391 unsigned int type, unsigned int code, int value) 392 { 393 unsigned long flags; 394 395 if (is_event_supported(type, dev->evbit, EV_MAX)) { 396 397 spin_lock_irqsave(&dev->event_lock, flags); 398 input_handle_event(dev, type, code, value); 399 spin_unlock_irqrestore(&dev->event_lock, flags); 400 } 401 } 402 EXPORT_SYMBOL(input_event); 403 404 /** 405 * input_inject_event() - send input event from input handler 406 * @handle: input handle to send event through 407 * @type: type of the event 408 * @code: event code 409 * @value: value of the event 410 * 411 * Similar to input_event() but will ignore event if device is 412 * "grabbed" and handle injecting event is not the one that owns 413 * the device. 414 */ 415 void input_inject_event(struct input_handle *handle, 416 unsigned int type, unsigned int code, int value) 417 { 418 struct input_dev *dev = handle->dev; 419 struct input_handle *grab; 420 unsigned long flags; 421 422 if (is_event_supported(type, dev->evbit, EV_MAX)) { 423 spin_lock_irqsave(&dev->event_lock, flags); 424 425 rcu_read_lock(); 426 grab = rcu_dereference(dev->grab); 427 if (!grab || grab == handle) 428 input_handle_event(dev, type, code, value); 429 rcu_read_unlock(); 430 431 spin_unlock_irqrestore(&dev->event_lock, flags); 432 } 433 } 434 EXPORT_SYMBOL(input_inject_event); 435 436 /** 437 * input_alloc_absinfo - allocates array of input_absinfo structs 438 * @dev: the input device emitting absolute events 439 * 440 * If the absinfo struct the caller asked for is already allocated, this 441 * functions will not do anything. 442 */ 443 void input_alloc_absinfo(struct input_dev *dev) 444 { 445 if (dev->absinfo) 446 return; 447 448 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 449 if (!dev->absinfo) { 450 dev_err(dev->dev.parent ?: &dev->dev, 451 "%s: unable to allocate memory\n", __func__); 452 /* 453 * We will handle this allocation failure in 454 * input_register_device() when we refuse to register input 455 * device with ABS bits but without absinfo. 456 */ 457 } 458 } 459 EXPORT_SYMBOL(input_alloc_absinfo); 460 461 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 462 int min, int max, int fuzz, int flat) 463 { 464 struct input_absinfo *absinfo; 465 466 __set_bit(EV_ABS, dev->evbit); 467 __set_bit(axis, dev->absbit); 468 469 input_alloc_absinfo(dev); 470 if (!dev->absinfo) 471 return; 472 473 absinfo = &dev->absinfo[axis]; 474 absinfo->minimum = min; 475 absinfo->maximum = max; 476 absinfo->fuzz = fuzz; 477 absinfo->flat = flat; 478 } 479 EXPORT_SYMBOL(input_set_abs_params); 480 481 /** 482 * input_copy_abs - Copy absinfo from one input_dev to another 483 * @dst: Destination input device to copy the abs settings to 484 * @dst_axis: ABS_* value selecting the destination axis 485 * @src: Source input device to copy the abs settings from 486 * @src_axis: ABS_* value selecting the source axis 487 * 488 * Set absinfo for the selected destination axis by copying it from 489 * the specified source input device's source axis. 490 * This is useful to e.g. setup a pen/stylus input-device for combined 491 * touchscreen/pen hardware where the pen uses the same coordinates as 492 * the touchscreen. 493 */ 494 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, 495 const struct input_dev *src, unsigned int src_axis) 496 { 497 /* src must have EV_ABS and src_axis set */ 498 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) && 499 test_bit(src_axis, src->absbit)))) 500 return; 501 502 /* 503 * input_alloc_absinfo() may have failed for the source. Our caller is 504 * expected to catch this when registering the input devices, which may 505 * happen after the input_copy_abs() call. 506 */ 507 if (!src->absinfo) 508 return; 509 510 input_set_capability(dst, EV_ABS, dst_axis); 511 if (!dst->absinfo) 512 return; 513 514 dst->absinfo[dst_axis] = src->absinfo[src_axis]; 515 } 516 EXPORT_SYMBOL(input_copy_abs); 517 518 /** 519 * input_grab_device - grabs device for exclusive use 520 * @handle: input handle that wants to own the device 521 * 522 * When a device is grabbed by an input handle all events generated by 523 * the device are delivered only to this handle. Also events injected 524 * by other input handles are ignored while device is grabbed. 525 */ 526 int input_grab_device(struct input_handle *handle) 527 { 528 struct input_dev *dev = handle->dev; 529 int retval; 530 531 retval = mutex_lock_interruptible(&dev->mutex); 532 if (retval) 533 return retval; 534 535 if (dev->grab) { 536 retval = -EBUSY; 537 goto out; 538 } 539 540 rcu_assign_pointer(dev->grab, handle); 541 542 out: 543 mutex_unlock(&dev->mutex); 544 return retval; 545 } 546 EXPORT_SYMBOL(input_grab_device); 547 548 static void __input_release_device(struct input_handle *handle) 549 { 550 struct input_dev *dev = handle->dev; 551 struct input_handle *grabber; 552 553 grabber = rcu_dereference_protected(dev->grab, 554 lockdep_is_held(&dev->mutex)); 555 if (grabber == handle) { 556 rcu_assign_pointer(dev->grab, NULL); 557 /* Make sure input_pass_values() notices that grab is gone */ 558 synchronize_rcu(); 559 560 list_for_each_entry(handle, &dev->h_list, d_node) 561 if (handle->open && handle->handler->start) 562 handle->handler->start(handle); 563 } 564 } 565 566 /** 567 * input_release_device - release previously grabbed device 568 * @handle: input handle that owns the device 569 * 570 * Releases previously grabbed device so that other input handles can 571 * start receiving input events. Upon release all handlers attached 572 * to the device have their start() method called so they have a change 573 * to synchronize device state with the rest of the system. 574 */ 575 void input_release_device(struct input_handle *handle) 576 { 577 struct input_dev *dev = handle->dev; 578 579 mutex_lock(&dev->mutex); 580 __input_release_device(handle); 581 mutex_unlock(&dev->mutex); 582 } 583 EXPORT_SYMBOL(input_release_device); 584 585 /** 586 * input_open_device - open input device 587 * @handle: handle through which device is being accessed 588 * 589 * This function should be called by input handlers when they 590 * want to start receive events from given input device. 591 */ 592 int input_open_device(struct input_handle *handle) 593 { 594 struct input_dev *dev = handle->dev; 595 int retval; 596 597 retval = mutex_lock_interruptible(&dev->mutex); 598 if (retval) 599 return retval; 600 601 if (dev->going_away) { 602 retval = -ENODEV; 603 goto out; 604 } 605 606 handle->open++; 607 608 if (handle->handler->passive_observer) 609 goto out; 610 611 if (dev->users++ || dev->inhibited) { 612 /* 613 * Device is already opened and/or inhibited, 614 * so we can exit immediately and report success. 615 */ 616 goto out; 617 } 618 619 if (dev->open) { 620 retval = dev->open(dev); 621 if (retval) { 622 dev->users--; 623 handle->open--; 624 /* 625 * Make sure we are not delivering any more events 626 * through this handle 627 */ 628 synchronize_rcu(); 629 goto out; 630 } 631 } 632 633 if (dev->poller) 634 input_dev_poller_start(dev->poller); 635 636 out: 637 mutex_unlock(&dev->mutex); 638 return retval; 639 } 640 EXPORT_SYMBOL(input_open_device); 641 642 int input_flush_device(struct input_handle *handle, struct file *file) 643 { 644 struct input_dev *dev = handle->dev; 645 int retval; 646 647 retval = mutex_lock_interruptible(&dev->mutex); 648 if (retval) 649 return retval; 650 651 if (dev->flush) 652 retval = dev->flush(dev, file); 653 654 mutex_unlock(&dev->mutex); 655 return retval; 656 } 657 EXPORT_SYMBOL(input_flush_device); 658 659 /** 660 * input_close_device - close input device 661 * @handle: handle through which device is being accessed 662 * 663 * This function should be called by input handlers when they 664 * want to stop receive events from given input device. 665 */ 666 void input_close_device(struct input_handle *handle) 667 { 668 struct input_dev *dev = handle->dev; 669 670 mutex_lock(&dev->mutex); 671 672 __input_release_device(handle); 673 674 if (!handle->handler->passive_observer) { 675 if (!--dev->users && !dev->inhibited) { 676 if (dev->poller) 677 input_dev_poller_stop(dev->poller); 678 if (dev->close) 679 dev->close(dev); 680 } 681 } 682 683 if (!--handle->open) { 684 /* 685 * synchronize_rcu() makes sure that input_pass_values() 686 * completed and that no more input events are delivered 687 * through this handle 688 */ 689 synchronize_rcu(); 690 } 691 692 mutex_unlock(&dev->mutex); 693 } 694 EXPORT_SYMBOL(input_close_device); 695 696 /* 697 * Simulate keyup events for all keys that are marked as pressed. 698 * The function must be called with dev->event_lock held. 699 */ 700 static bool input_dev_release_keys(struct input_dev *dev) 701 { 702 bool need_sync = false; 703 int code; 704 705 lockdep_assert_held(&dev->event_lock); 706 707 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 708 for_each_set_bit(code, dev->key, KEY_CNT) { 709 input_handle_event(dev, EV_KEY, code, 0); 710 need_sync = true; 711 } 712 } 713 714 return need_sync; 715 } 716 717 /* 718 * Prepare device for unregistering 719 */ 720 static void input_disconnect_device(struct input_dev *dev) 721 { 722 struct input_handle *handle; 723 724 /* 725 * Mark device as going away. Note that we take dev->mutex here 726 * not to protect access to dev->going_away but rather to ensure 727 * that there are no threads in the middle of input_open_device() 728 */ 729 mutex_lock(&dev->mutex); 730 dev->going_away = true; 731 mutex_unlock(&dev->mutex); 732 733 spin_lock_irq(&dev->event_lock); 734 735 /* 736 * Simulate keyup events for all pressed keys so that handlers 737 * are not left with "stuck" keys. The driver may continue 738 * generate events even after we done here but they will not 739 * reach any handlers. 740 */ 741 if (input_dev_release_keys(dev)) 742 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 743 744 list_for_each_entry(handle, &dev->h_list, d_node) 745 handle->open = 0; 746 747 spin_unlock_irq(&dev->event_lock); 748 } 749 750 /** 751 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 752 * @ke: keymap entry containing scancode to be converted. 753 * @scancode: pointer to the location where converted scancode should 754 * be stored. 755 * 756 * This function is used to convert scancode stored in &struct keymap_entry 757 * into scalar form understood by legacy keymap handling methods. These 758 * methods expect scancodes to be represented as 'unsigned int'. 759 */ 760 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 761 unsigned int *scancode) 762 { 763 switch (ke->len) { 764 case 1: 765 *scancode = *((u8 *)ke->scancode); 766 break; 767 768 case 2: 769 *scancode = *((u16 *)ke->scancode); 770 break; 771 772 case 4: 773 *scancode = *((u32 *)ke->scancode); 774 break; 775 776 default: 777 return -EINVAL; 778 } 779 780 return 0; 781 } 782 EXPORT_SYMBOL(input_scancode_to_scalar); 783 784 /* 785 * Those routines handle the default case where no [gs]etkeycode() is 786 * defined. In this case, an array indexed by the scancode is used. 787 */ 788 789 static unsigned int input_fetch_keycode(struct input_dev *dev, 790 unsigned int index) 791 { 792 switch (dev->keycodesize) { 793 case 1: 794 return ((u8 *)dev->keycode)[index]; 795 796 case 2: 797 return ((u16 *)dev->keycode)[index]; 798 799 default: 800 return ((u32 *)dev->keycode)[index]; 801 } 802 } 803 804 static int input_default_getkeycode(struct input_dev *dev, 805 struct input_keymap_entry *ke) 806 { 807 unsigned int index; 808 int error; 809 810 if (!dev->keycodesize) 811 return -EINVAL; 812 813 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 814 index = ke->index; 815 else { 816 error = input_scancode_to_scalar(ke, &index); 817 if (error) 818 return error; 819 } 820 821 if (index >= dev->keycodemax) 822 return -EINVAL; 823 824 ke->keycode = input_fetch_keycode(dev, index); 825 ke->index = index; 826 ke->len = sizeof(index); 827 memcpy(ke->scancode, &index, sizeof(index)); 828 829 return 0; 830 } 831 832 static int input_default_setkeycode(struct input_dev *dev, 833 const struct input_keymap_entry *ke, 834 unsigned int *old_keycode) 835 { 836 unsigned int index; 837 int error; 838 int i; 839 840 if (!dev->keycodesize) 841 return -EINVAL; 842 843 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 844 index = ke->index; 845 } else { 846 error = input_scancode_to_scalar(ke, &index); 847 if (error) 848 return error; 849 } 850 851 if (index >= dev->keycodemax) 852 return -EINVAL; 853 854 if (dev->keycodesize < sizeof(ke->keycode) && 855 (ke->keycode >> (dev->keycodesize * 8))) 856 return -EINVAL; 857 858 switch (dev->keycodesize) { 859 case 1: { 860 u8 *k = (u8 *)dev->keycode; 861 *old_keycode = k[index]; 862 k[index] = ke->keycode; 863 break; 864 } 865 case 2: { 866 u16 *k = (u16 *)dev->keycode; 867 *old_keycode = k[index]; 868 k[index] = ke->keycode; 869 break; 870 } 871 default: { 872 u32 *k = (u32 *)dev->keycode; 873 *old_keycode = k[index]; 874 k[index] = ke->keycode; 875 break; 876 } 877 } 878 879 if (*old_keycode <= KEY_MAX) { 880 __clear_bit(*old_keycode, dev->keybit); 881 for (i = 0; i < dev->keycodemax; i++) { 882 if (input_fetch_keycode(dev, i) == *old_keycode) { 883 __set_bit(*old_keycode, dev->keybit); 884 /* Setting the bit twice is useless, so break */ 885 break; 886 } 887 } 888 } 889 890 __set_bit(ke->keycode, dev->keybit); 891 return 0; 892 } 893 894 /** 895 * input_get_keycode - retrieve keycode currently mapped to a given scancode 896 * @dev: input device which keymap is being queried 897 * @ke: keymap entry 898 * 899 * This function should be called by anyone interested in retrieving current 900 * keymap. Presently evdev handlers use it. 901 */ 902 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 903 { 904 unsigned long flags; 905 int retval; 906 907 spin_lock_irqsave(&dev->event_lock, flags); 908 retval = dev->getkeycode(dev, ke); 909 spin_unlock_irqrestore(&dev->event_lock, flags); 910 911 return retval; 912 } 913 EXPORT_SYMBOL(input_get_keycode); 914 915 /** 916 * input_set_keycode - attribute a keycode to a given scancode 917 * @dev: input device which keymap is being updated 918 * @ke: new keymap entry 919 * 920 * This function should be called by anyone needing to update current 921 * keymap. Presently keyboard and evdev handlers use it. 922 */ 923 int input_set_keycode(struct input_dev *dev, 924 const struct input_keymap_entry *ke) 925 { 926 unsigned long flags; 927 unsigned int old_keycode; 928 int retval; 929 930 if (ke->keycode > KEY_MAX) 931 return -EINVAL; 932 933 spin_lock_irqsave(&dev->event_lock, flags); 934 935 retval = dev->setkeycode(dev, ke, &old_keycode); 936 if (retval) 937 goto out; 938 939 /* Make sure KEY_RESERVED did not get enabled. */ 940 __clear_bit(KEY_RESERVED, dev->keybit); 941 942 /* 943 * Simulate keyup event if keycode is not present 944 * in the keymap anymore 945 */ 946 if (old_keycode > KEY_MAX) { 947 dev_warn(dev->dev.parent ?: &dev->dev, 948 "%s: got too big old keycode %#x\n", 949 __func__, old_keycode); 950 } else if (test_bit(EV_KEY, dev->evbit) && 951 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 952 __test_and_clear_bit(old_keycode, dev->key)) { 953 /* 954 * We have to use input_event_dispose() here directly instead 955 * of input_handle_event() because the key we want to release 956 * here is considered no longer supported by the device and 957 * input_handle_event() will ignore it. 958 */ 959 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS, 960 EV_KEY, old_keycode, 0); 961 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH, 962 EV_SYN, SYN_REPORT, 1); 963 } 964 965 out: 966 spin_unlock_irqrestore(&dev->event_lock, flags); 967 968 return retval; 969 } 970 EXPORT_SYMBOL(input_set_keycode); 971 972 bool input_match_device_id(const struct input_dev *dev, 973 const struct input_device_id *id) 974 { 975 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 976 if (id->bustype != dev->id.bustype) 977 return false; 978 979 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 980 if (id->vendor != dev->id.vendor) 981 return false; 982 983 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 984 if (id->product != dev->id.product) 985 return false; 986 987 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 988 if (id->version != dev->id.version) 989 return false; 990 991 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 992 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 993 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 994 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 995 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 996 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 997 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 998 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 999 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 1000 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 1001 return false; 1002 } 1003 1004 return true; 1005 } 1006 EXPORT_SYMBOL(input_match_device_id); 1007 1008 static const struct input_device_id *input_match_device(struct input_handler *handler, 1009 struct input_dev *dev) 1010 { 1011 const struct input_device_id *id; 1012 1013 for (id = handler->id_table; id->flags || id->driver_info; id++) { 1014 if (input_match_device_id(dev, id) && 1015 (!handler->match || handler->match(handler, dev))) { 1016 return id; 1017 } 1018 } 1019 1020 return NULL; 1021 } 1022 1023 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 1024 { 1025 const struct input_device_id *id; 1026 int error; 1027 1028 id = input_match_device(handler, dev); 1029 if (!id) 1030 return -ENODEV; 1031 1032 error = handler->connect(handler, dev, id); 1033 if (error && error != -ENODEV) 1034 pr_err("failed to attach handler %s to device %s, error: %d\n", 1035 handler->name, kobject_name(&dev->dev.kobj), error); 1036 1037 return error; 1038 } 1039 1040 #ifdef CONFIG_COMPAT 1041 1042 static int input_bits_to_string(char *buf, int buf_size, 1043 unsigned long bits, bool skip_empty) 1044 { 1045 int len = 0; 1046 1047 if (in_compat_syscall()) { 1048 u32 dword = bits >> 32; 1049 if (dword || !skip_empty) 1050 len += snprintf(buf, buf_size, "%x ", dword); 1051 1052 dword = bits & 0xffffffffUL; 1053 if (dword || !skip_empty || len) 1054 len += snprintf(buf + len, max(buf_size - len, 0), 1055 "%x", dword); 1056 } else { 1057 if (bits || !skip_empty) 1058 len += snprintf(buf, buf_size, "%lx", bits); 1059 } 1060 1061 return len; 1062 } 1063 1064 #else /* !CONFIG_COMPAT */ 1065 1066 static int input_bits_to_string(char *buf, int buf_size, 1067 unsigned long bits, bool skip_empty) 1068 { 1069 return bits || !skip_empty ? 1070 snprintf(buf, buf_size, "%lx", bits) : 0; 1071 } 1072 1073 #endif 1074 1075 #ifdef CONFIG_PROC_FS 1076 1077 static struct proc_dir_entry *proc_bus_input_dir; 1078 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1079 static int input_devices_state; 1080 1081 static inline void input_wakeup_procfs_readers(void) 1082 { 1083 input_devices_state++; 1084 wake_up(&input_devices_poll_wait); 1085 } 1086 1087 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1088 { 1089 poll_wait(file, &input_devices_poll_wait, wait); 1090 if (file->f_version != input_devices_state) { 1091 file->f_version = input_devices_state; 1092 return EPOLLIN | EPOLLRDNORM; 1093 } 1094 1095 return 0; 1096 } 1097 1098 union input_seq_state { 1099 struct { 1100 unsigned short pos; 1101 bool mutex_acquired; 1102 }; 1103 void *p; 1104 }; 1105 1106 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1107 { 1108 union input_seq_state *state = (union input_seq_state *)&seq->private; 1109 int error; 1110 1111 /* We need to fit into seq->private pointer */ 1112 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1113 1114 error = mutex_lock_interruptible(&input_mutex); 1115 if (error) { 1116 state->mutex_acquired = false; 1117 return ERR_PTR(error); 1118 } 1119 1120 state->mutex_acquired = true; 1121 1122 return seq_list_start(&input_dev_list, *pos); 1123 } 1124 1125 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1126 { 1127 return seq_list_next(v, &input_dev_list, pos); 1128 } 1129 1130 static void input_seq_stop(struct seq_file *seq, void *v) 1131 { 1132 union input_seq_state *state = (union input_seq_state *)&seq->private; 1133 1134 if (state->mutex_acquired) 1135 mutex_unlock(&input_mutex); 1136 } 1137 1138 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1139 unsigned long *bitmap, int max) 1140 { 1141 int i; 1142 bool skip_empty = true; 1143 char buf[18]; 1144 1145 seq_printf(seq, "B: %s=", name); 1146 1147 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1148 if (input_bits_to_string(buf, sizeof(buf), 1149 bitmap[i], skip_empty)) { 1150 skip_empty = false; 1151 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1152 } 1153 } 1154 1155 /* 1156 * If no output was produced print a single 0. 1157 */ 1158 if (skip_empty) 1159 seq_putc(seq, '0'); 1160 1161 seq_putc(seq, '\n'); 1162 } 1163 1164 static int input_devices_seq_show(struct seq_file *seq, void *v) 1165 { 1166 struct input_dev *dev = container_of(v, struct input_dev, node); 1167 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1168 struct input_handle *handle; 1169 1170 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1171 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1172 1173 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1174 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1175 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1176 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1177 seq_puts(seq, "H: Handlers="); 1178 1179 list_for_each_entry(handle, &dev->h_list, d_node) 1180 seq_printf(seq, "%s ", handle->name); 1181 seq_putc(seq, '\n'); 1182 1183 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1184 1185 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1186 if (test_bit(EV_KEY, dev->evbit)) 1187 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1188 if (test_bit(EV_REL, dev->evbit)) 1189 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1190 if (test_bit(EV_ABS, dev->evbit)) 1191 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1192 if (test_bit(EV_MSC, dev->evbit)) 1193 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1194 if (test_bit(EV_LED, dev->evbit)) 1195 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1196 if (test_bit(EV_SND, dev->evbit)) 1197 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1198 if (test_bit(EV_FF, dev->evbit)) 1199 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1200 if (test_bit(EV_SW, dev->evbit)) 1201 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1202 1203 seq_putc(seq, '\n'); 1204 1205 kfree(path); 1206 return 0; 1207 } 1208 1209 static const struct seq_operations input_devices_seq_ops = { 1210 .start = input_devices_seq_start, 1211 .next = input_devices_seq_next, 1212 .stop = input_seq_stop, 1213 .show = input_devices_seq_show, 1214 }; 1215 1216 static int input_proc_devices_open(struct inode *inode, struct file *file) 1217 { 1218 return seq_open(file, &input_devices_seq_ops); 1219 } 1220 1221 static const struct proc_ops input_devices_proc_ops = { 1222 .proc_open = input_proc_devices_open, 1223 .proc_poll = input_proc_devices_poll, 1224 .proc_read = seq_read, 1225 .proc_lseek = seq_lseek, 1226 .proc_release = seq_release, 1227 }; 1228 1229 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1230 { 1231 union input_seq_state *state = (union input_seq_state *)&seq->private; 1232 int error; 1233 1234 /* We need to fit into seq->private pointer */ 1235 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1236 1237 error = mutex_lock_interruptible(&input_mutex); 1238 if (error) { 1239 state->mutex_acquired = false; 1240 return ERR_PTR(error); 1241 } 1242 1243 state->mutex_acquired = true; 1244 state->pos = *pos; 1245 1246 return seq_list_start(&input_handler_list, *pos); 1247 } 1248 1249 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1250 { 1251 union input_seq_state *state = (union input_seq_state *)&seq->private; 1252 1253 state->pos = *pos + 1; 1254 return seq_list_next(v, &input_handler_list, pos); 1255 } 1256 1257 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1258 { 1259 struct input_handler *handler = container_of(v, struct input_handler, node); 1260 union input_seq_state *state = (union input_seq_state *)&seq->private; 1261 1262 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1263 if (handler->filter) 1264 seq_puts(seq, " (filter)"); 1265 if (handler->legacy_minors) 1266 seq_printf(seq, " Minor=%d", handler->minor); 1267 seq_putc(seq, '\n'); 1268 1269 return 0; 1270 } 1271 1272 static const struct seq_operations input_handlers_seq_ops = { 1273 .start = input_handlers_seq_start, 1274 .next = input_handlers_seq_next, 1275 .stop = input_seq_stop, 1276 .show = input_handlers_seq_show, 1277 }; 1278 1279 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1280 { 1281 return seq_open(file, &input_handlers_seq_ops); 1282 } 1283 1284 static const struct proc_ops input_handlers_proc_ops = { 1285 .proc_open = input_proc_handlers_open, 1286 .proc_read = seq_read, 1287 .proc_lseek = seq_lseek, 1288 .proc_release = seq_release, 1289 }; 1290 1291 static int __init input_proc_init(void) 1292 { 1293 struct proc_dir_entry *entry; 1294 1295 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1296 if (!proc_bus_input_dir) 1297 return -ENOMEM; 1298 1299 entry = proc_create("devices", 0, proc_bus_input_dir, 1300 &input_devices_proc_ops); 1301 if (!entry) 1302 goto fail1; 1303 1304 entry = proc_create("handlers", 0, proc_bus_input_dir, 1305 &input_handlers_proc_ops); 1306 if (!entry) 1307 goto fail2; 1308 1309 return 0; 1310 1311 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1312 fail1: remove_proc_entry("bus/input", NULL); 1313 return -ENOMEM; 1314 } 1315 1316 static void input_proc_exit(void) 1317 { 1318 remove_proc_entry("devices", proc_bus_input_dir); 1319 remove_proc_entry("handlers", proc_bus_input_dir); 1320 remove_proc_entry("bus/input", NULL); 1321 } 1322 1323 #else /* !CONFIG_PROC_FS */ 1324 static inline void input_wakeup_procfs_readers(void) { } 1325 static inline int input_proc_init(void) { return 0; } 1326 static inline void input_proc_exit(void) { } 1327 #endif 1328 1329 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1330 static ssize_t input_dev_show_##name(struct device *dev, \ 1331 struct device_attribute *attr, \ 1332 char *buf) \ 1333 { \ 1334 struct input_dev *input_dev = to_input_dev(dev); \ 1335 \ 1336 return sysfs_emit(buf, "%s\n", \ 1337 input_dev->name ? input_dev->name : ""); \ 1338 } \ 1339 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1340 1341 INPUT_DEV_STRING_ATTR_SHOW(name); 1342 INPUT_DEV_STRING_ATTR_SHOW(phys); 1343 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1344 1345 static int input_print_modalias_bits(char *buf, int size, 1346 char name, const unsigned long *bm, 1347 unsigned int min_bit, unsigned int max_bit) 1348 { 1349 int bit = min_bit; 1350 int len = 0; 1351 1352 len += snprintf(buf, max(size, 0), "%c", name); 1353 for_each_set_bit_from(bit, bm, max_bit) 1354 len += snprintf(buf + len, max(size - len, 0), "%X,", bit); 1355 return len; 1356 } 1357 1358 static int input_print_modalias_parts(char *buf, int size, int full_len, 1359 const struct input_dev *id) 1360 { 1361 int len, klen, remainder, space; 1362 1363 len = snprintf(buf, max(size, 0), 1364 "input:b%04Xv%04Xp%04Xe%04X-", 1365 id->id.bustype, id->id.vendor, 1366 id->id.product, id->id.version); 1367 1368 len += input_print_modalias_bits(buf + len, size - len, 1369 'e', id->evbit, 0, EV_MAX); 1370 1371 /* 1372 * Calculate the remaining space in the buffer making sure we 1373 * have place for the terminating 0. 1374 */ 1375 space = max(size - (len + 1), 0); 1376 1377 klen = input_print_modalias_bits(buf + len, size - len, 1378 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1379 len += klen; 1380 1381 /* 1382 * If we have more data than we can fit in the buffer, check 1383 * if we can trim key data to fit in the rest. We will indicate 1384 * that key data is incomplete by adding "+" sign at the end, like 1385 * this: * "k1,2,3,45,+,". 1386 * 1387 * Note that we shortest key info (if present) is "k+," so we 1388 * can only try to trim if key data is longer than that. 1389 */ 1390 if (full_len && size < full_len + 1 && klen > 3) { 1391 remainder = full_len - len; 1392 /* 1393 * We can only trim if we have space for the remainder 1394 * and also for at least "k+," which is 3 more characters. 1395 */ 1396 if (remainder <= space - 3) { 1397 /* 1398 * We are guaranteed to have 'k' in the buffer, so 1399 * we need at least 3 additional bytes for storing 1400 * "+," in addition to the remainder. 1401 */ 1402 for (int i = size - 1 - remainder - 3; i >= 0; i--) { 1403 if (buf[i] == 'k' || buf[i] == ',') { 1404 strcpy(buf + i + 1, "+,"); 1405 len = i + 3; /* Not counting '\0' */ 1406 break; 1407 } 1408 } 1409 } 1410 } 1411 1412 len += input_print_modalias_bits(buf + len, size - len, 1413 'r', id->relbit, 0, REL_MAX); 1414 len += input_print_modalias_bits(buf + len, size - len, 1415 'a', id->absbit, 0, ABS_MAX); 1416 len += input_print_modalias_bits(buf + len, size - len, 1417 'm', id->mscbit, 0, MSC_MAX); 1418 len += input_print_modalias_bits(buf + len, size - len, 1419 'l', id->ledbit, 0, LED_MAX); 1420 len += input_print_modalias_bits(buf + len, size - len, 1421 's', id->sndbit, 0, SND_MAX); 1422 len += input_print_modalias_bits(buf + len, size - len, 1423 'f', id->ffbit, 0, FF_MAX); 1424 len += input_print_modalias_bits(buf + len, size - len, 1425 'w', id->swbit, 0, SW_MAX); 1426 1427 return len; 1428 } 1429 1430 static int input_print_modalias(char *buf, int size, const struct input_dev *id) 1431 { 1432 int full_len; 1433 1434 /* 1435 * Printing is done in 2 passes: first one figures out total length 1436 * needed for the modalias string, second one will try to trim key 1437 * data in case when buffer is too small for the entire modalias. 1438 * If the buffer is too small regardless, it will fill as much as it 1439 * can (without trimming key data) into the buffer and leave it to 1440 * the caller to figure out what to do with the result. 1441 */ 1442 full_len = input_print_modalias_parts(NULL, 0, 0, id); 1443 return input_print_modalias_parts(buf, size, full_len, id); 1444 } 1445 1446 static ssize_t input_dev_show_modalias(struct device *dev, 1447 struct device_attribute *attr, 1448 char *buf) 1449 { 1450 struct input_dev *id = to_input_dev(dev); 1451 ssize_t len; 1452 1453 len = input_print_modalias(buf, PAGE_SIZE, id); 1454 if (len < PAGE_SIZE - 2) 1455 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1456 1457 return min_t(int, len, PAGE_SIZE); 1458 } 1459 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1460 1461 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1462 int max, int add_cr); 1463 1464 static ssize_t input_dev_show_properties(struct device *dev, 1465 struct device_attribute *attr, 1466 char *buf) 1467 { 1468 struct input_dev *input_dev = to_input_dev(dev); 1469 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1470 INPUT_PROP_MAX, true); 1471 return min_t(int, len, PAGE_SIZE); 1472 } 1473 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1474 1475 static int input_inhibit_device(struct input_dev *dev); 1476 static int input_uninhibit_device(struct input_dev *dev); 1477 1478 static ssize_t inhibited_show(struct device *dev, 1479 struct device_attribute *attr, 1480 char *buf) 1481 { 1482 struct input_dev *input_dev = to_input_dev(dev); 1483 1484 return sysfs_emit(buf, "%d\n", input_dev->inhibited); 1485 } 1486 1487 static ssize_t inhibited_store(struct device *dev, 1488 struct device_attribute *attr, const char *buf, 1489 size_t len) 1490 { 1491 struct input_dev *input_dev = to_input_dev(dev); 1492 ssize_t rv; 1493 bool inhibited; 1494 1495 if (kstrtobool(buf, &inhibited)) 1496 return -EINVAL; 1497 1498 if (inhibited) 1499 rv = input_inhibit_device(input_dev); 1500 else 1501 rv = input_uninhibit_device(input_dev); 1502 1503 if (rv != 0) 1504 return rv; 1505 1506 return len; 1507 } 1508 1509 static DEVICE_ATTR_RW(inhibited); 1510 1511 static struct attribute *input_dev_attrs[] = { 1512 &dev_attr_name.attr, 1513 &dev_attr_phys.attr, 1514 &dev_attr_uniq.attr, 1515 &dev_attr_modalias.attr, 1516 &dev_attr_properties.attr, 1517 &dev_attr_inhibited.attr, 1518 NULL 1519 }; 1520 1521 static const struct attribute_group input_dev_attr_group = { 1522 .attrs = input_dev_attrs, 1523 }; 1524 1525 #define INPUT_DEV_ID_ATTR(name) \ 1526 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1527 struct device_attribute *attr, \ 1528 char *buf) \ 1529 { \ 1530 struct input_dev *input_dev = to_input_dev(dev); \ 1531 return sysfs_emit(buf, "%04x\n", input_dev->id.name); \ 1532 } \ 1533 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1534 1535 INPUT_DEV_ID_ATTR(bustype); 1536 INPUT_DEV_ID_ATTR(vendor); 1537 INPUT_DEV_ID_ATTR(product); 1538 INPUT_DEV_ID_ATTR(version); 1539 1540 static struct attribute *input_dev_id_attrs[] = { 1541 &dev_attr_bustype.attr, 1542 &dev_attr_vendor.attr, 1543 &dev_attr_product.attr, 1544 &dev_attr_version.attr, 1545 NULL 1546 }; 1547 1548 static const struct attribute_group input_dev_id_attr_group = { 1549 .name = "id", 1550 .attrs = input_dev_id_attrs, 1551 }; 1552 1553 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1554 int max, int add_cr) 1555 { 1556 int i; 1557 int len = 0; 1558 bool skip_empty = true; 1559 1560 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1561 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1562 bitmap[i], skip_empty); 1563 if (len) { 1564 skip_empty = false; 1565 if (i > 0) 1566 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1567 } 1568 } 1569 1570 /* 1571 * If no output was produced print a single 0. 1572 */ 1573 if (len == 0) 1574 len = snprintf(buf, buf_size, "%d", 0); 1575 1576 if (add_cr) 1577 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1578 1579 return len; 1580 } 1581 1582 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1583 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1584 struct device_attribute *attr, \ 1585 char *buf) \ 1586 { \ 1587 struct input_dev *input_dev = to_input_dev(dev); \ 1588 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1589 input_dev->bm##bit, ev##_MAX, \ 1590 true); \ 1591 return min_t(int, len, PAGE_SIZE); \ 1592 } \ 1593 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1594 1595 INPUT_DEV_CAP_ATTR(EV, ev); 1596 INPUT_DEV_CAP_ATTR(KEY, key); 1597 INPUT_DEV_CAP_ATTR(REL, rel); 1598 INPUT_DEV_CAP_ATTR(ABS, abs); 1599 INPUT_DEV_CAP_ATTR(MSC, msc); 1600 INPUT_DEV_CAP_ATTR(LED, led); 1601 INPUT_DEV_CAP_ATTR(SND, snd); 1602 INPUT_DEV_CAP_ATTR(FF, ff); 1603 INPUT_DEV_CAP_ATTR(SW, sw); 1604 1605 static struct attribute *input_dev_caps_attrs[] = { 1606 &dev_attr_ev.attr, 1607 &dev_attr_key.attr, 1608 &dev_attr_rel.attr, 1609 &dev_attr_abs.attr, 1610 &dev_attr_msc.attr, 1611 &dev_attr_led.attr, 1612 &dev_attr_snd.attr, 1613 &dev_attr_ff.attr, 1614 &dev_attr_sw.attr, 1615 NULL 1616 }; 1617 1618 static const struct attribute_group input_dev_caps_attr_group = { 1619 .name = "capabilities", 1620 .attrs = input_dev_caps_attrs, 1621 }; 1622 1623 static const struct attribute_group *input_dev_attr_groups[] = { 1624 &input_dev_attr_group, 1625 &input_dev_id_attr_group, 1626 &input_dev_caps_attr_group, 1627 &input_poller_attribute_group, 1628 NULL 1629 }; 1630 1631 static void input_dev_release(struct device *device) 1632 { 1633 struct input_dev *dev = to_input_dev(device); 1634 1635 input_ff_destroy(dev); 1636 input_mt_destroy_slots(dev); 1637 kfree(dev->poller); 1638 kfree(dev->absinfo); 1639 kfree(dev->vals); 1640 kfree(dev); 1641 1642 module_put(THIS_MODULE); 1643 } 1644 1645 /* 1646 * Input uevent interface - loading event handlers based on 1647 * device bitfields. 1648 */ 1649 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1650 const char *name, const unsigned long *bitmap, int max) 1651 { 1652 int len; 1653 1654 if (add_uevent_var(env, "%s", name)) 1655 return -ENOMEM; 1656 1657 len = input_print_bitmap(&env->buf[env->buflen - 1], 1658 sizeof(env->buf) - env->buflen, 1659 bitmap, max, false); 1660 if (len >= (sizeof(env->buf) - env->buflen)) 1661 return -ENOMEM; 1662 1663 env->buflen += len; 1664 return 0; 1665 } 1666 1667 /* 1668 * This is a pretty gross hack. When building uevent data the driver core 1669 * may try adding more environment variables to kobj_uevent_env without 1670 * telling us, so we have no idea how much of the buffer we can use to 1671 * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially 1672 * reduce amount of memory we will use for the modalias environment variable. 1673 * 1674 * The potential additions are: 1675 * 1676 * SEQNUM=18446744073709551615 - (%llu - 28 bytes) 1677 * HOME=/ (6 bytes) 1678 * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes) 1679 * 1680 * 68 bytes total. Allow extra buffer - 96 bytes 1681 */ 1682 #define UEVENT_ENV_EXTRA_LEN 96 1683 1684 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1685 const struct input_dev *dev) 1686 { 1687 int len; 1688 1689 if (add_uevent_var(env, "MODALIAS=")) 1690 return -ENOMEM; 1691 1692 len = input_print_modalias(&env->buf[env->buflen - 1], 1693 (int)sizeof(env->buf) - env->buflen - 1694 UEVENT_ENV_EXTRA_LEN, 1695 dev); 1696 if (len >= ((int)sizeof(env->buf) - env->buflen - 1697 UEVENT_ENV_EXTRA_LEN)) 1698 return -ENOMEM; 1699 1700 env->buflen += len; 1701 return 0; 1702 } 1703 1704 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1705 do { \ 1706 int err = add_uevent_var(env, fmt, val); \ 1707 if (err) \ 1708 return err; \ 1709 } while (0) 1710 1711 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1712 do { \ 1713 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1714 if (err) \ 1715 return err; \ 1716 } while (0) 1717 1718 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1719 do { \ 1720 int err = input_add_uevent_modalias_var(env, dev); \ 1721 if (err) \ 1722 return err; \ 1723 } while (0) 1724 1725 static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env) 1726 { 1727 const struct input_dev *dev = to_input_dev(device); 1728 1729 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1730 dev->id.bustype, dev->id.vendor, 1731 dev->id.product, dev->id.version); 1732 if (dev->name) 1733 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1734 if (dev->phys) 1735 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1736 if (dev->uniq) 1737 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1738 1739 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1740 1741 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1742 if (test_bit(EV_KEY, dev->evbit)) 1743 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1744 if (test_bit(EV_REL, dev->evbit)) 1745 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1746 if (test_bit(EV_ABS, dev->evbit)) 1747 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1748 if (test_bit(EV_MSC, dev->evbit)) 1749 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1750 if (test_bit(EV_LED, dev->evbit)) 1751 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1752 if (test_bit(EV_SND, dev->evbit)) 1753 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1754 if (test_bit(EV_FF, dev->evbit)) 1755 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1756 if (test_bit(EV_SW, dev->evbit)) 1757 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1758 1759 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1760 1761 return 0; 1762 } 1763 1764 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1765 do { \ 1766 int i; \ 1767 bool active; \ 1768 \ 1769 if (!test_bit(EV_##type, dev->evbit)) \ 1770 break; \ 1771 \ 1772 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1773 active = test_bit(i, dev->bits); \ 1774 if (!active && !on) \ 1775 continue; \ 1776 \ 1777 dev->event(dev, EV_##type, i, on ? active : 0); \ 1778 } \ 1779 } while (0) 1780 1781 static void input_dev_toggle(struct input_dev *dev, bool activate) 1782 { 1783 if (!dev->event) 1784 return; 1785 1786 INPUT_DO_TOGGLE(dev, LED, led, activate); 1787 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1788 1789 if (activate && test_bit(EV_REP, dev->evbit)) { 1790 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1791 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1792 } 1793 } 1794 1795 /** 1796 * input_reset_device() - reset/restore the state of input device 1797 * @dev: input device whose state needs to be reset 1798 * 1799 * This function tries to reset the state of an opened input device and 1800 * bring internal state and state if the hardware in sync with each other. 1801 * We mark all keys as released, restore LED state, repeat rate, etc. 1802 */ 1803 void input_reset_device(struct input_dev *dev) 1804 { 1805 unsigned long flags; 1806 1807 mutex_lock(&dev->mutex); 1808 spin_lock_irqsave(&dev->event_lock, flags); 1809 1810 input_dev_toggle(dev, true); 1811 if (input_dev_release_keys(dev)) 1812 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1813 1814 spin_unlock_irqrestore(&dev->event_lock, flags); 1815 mutex_unlock(&dev->mutex); 1816 } 1817 EXPORT_SYMBOL(input_reset_device); 1818 1819 static int input_inhibit_device(struct input_dev *dev) 1820 { 1821 mutex_lock(&dev->mutex); 1822 1823 if (dev->inhibited) 1824 goto out; 1825 1826 if (dev->users) { 1827 if (dev->close) 1828 dev->close(dev); 1829 if (dev->poller) 1830 input_dev_poller_stop(dev->poller); 1831 } 1832 1833 spin_lock_irq(&dev->event_lock); 1834 input_mt_release_slots(dev); 1835 input_dev_release_keys(dev); 1836 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1837 input_dev_toggle(dev, false); 1838 spin_unlock_irq(&dev->event_lock); 1839 1840 dev->inhibited = true; 1841 1842 out: 1843 mutex_unlock(&dev->mutex); 1844 return 0; 1845 } 1846 1847 static int input_uninhibit_device(struct input_dev *dev) 1848 { 1849 int ret = 0; 1850 1851 mutex_lock(&dev->mutex); 1852 1853 if (!dev->inhibited) 1854 goto out; 1855 1856 if (dev->users) { 1857 if (dev->open) { 1858 ret = dev->open(dev); 1859 if (ret) 1860 goto out; 1861 } 1862 if (dev->poller) 1863 input_dev_poller_start(dev->poller); 1864 } 1865 1866 dev->inhibited = false; 1867 spin_lock_irq(&dev->event_lock); 1868 input_dev_toggle(dev, true); 1869 spin_unlock_irq(&dev->event_lock); 1870 1871 out: 1872 mutex_unlock(&dev->mutex); 1873 return ret; 1874 } 1875 1876 static int input_dev_suspend(struct device *dev) 1877 { 1878 struct input_dev *input_dev = to_input_dev(dev); 1879 1880 spin_lock_irq(&input_dev->event_lock); 1881 1882 /* 1883 * Keys that are pressed now are unlikely to be 1884 * still pressed when we resume. 1885 */ 1886 if (input_dev_release_keys(input_dev)) 1887 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1888 1889 /* Turn off LEDs and sounds, if any are active. */ 1890 input_dev_toggle(input_dev, false); 1891 1892 spin_unlock_irq(&input_dev->event_lock); 1893 1894 return 0; 1895 } 1896 1897 static int input_dev_resume(struct device *dev) 1898 { 1899 struct input_dev *input_dev = to_input_dev(dev); 1900 1901 spin_lock_irq(&input_dev->event_lock); 1902 1903 /* Restore state of LEDs and sounds, if any were active. */ 1904 input_dev_toggle(input_dev, true); 1905 1906 spin_unlock_irq(&input_dev->event_lock); 1907 1908 return 0; 1909 } 1910 1911 static int input_dev_freeze(struct device *dev) 1912 { 1913 struct input_dev *input_dev = to_input_dev(dev); 1914 1915 spin_lock_irq(&input_dev->event_lock); 1916 1917 /* 1918 * Keys that are pressed now are unlikely to be 1919 * still pressed when we resume. 1920 */ 1921 if (input_dev_release_keys(input_dev)) 1922 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1923 1924 spin_unlock_irq(&input_dev->event_lock); 1925 1926 return 0; 1927 } 1928 1929 static int input_dev_poweroff(struct device *dev) 1930 { 1931 struct input_dev *input_dev = to_input_dev(dev); 1932 1933 spin_lock_irq(&input_dev->event_lock); 1934 1935 /* Turn off LEDs and sounds, if any are active. */ 1936 input_dev_toggle(input_dev, false); 1937 1938 spin_unlock_irq(&input_dev->event_lock); 1939 1940 return 0; 1941 } 1942 1943 static const struct dev_pm_ops input_dev_pm_ops = { 1944 .suspend = input_dev_suspend, 1945 .resume = input_dev_resume, 1946 .freeze = input_dev_freeze, 1947 .poweroff = input_dev_poweroff, 1948 .restore = input_dev_resume, 1949 }; 1950 1951 static const struct device_type input_dev_type = { 1952 .groups = input_dev_attr_groups, 1953 .release = input_dev_release, 1954 .uevent = input_dev_uevent, 1955 .pm = pm_sleep_ptr(&input_dev_pm_ops), 1956 }; 1957 1958 static char *input_devnode(const struct device *dev, umode_t *mode) 1959 { 1960 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1961 } 1962 1963 const struct class input_class = { 1964 .name = "input", 1965 .devnode = input_devnode, 1966 }; 1967 EXPORT_SYMBOL_GPL(input_class); 1968 1969 /** 1970 * input_allocate_device - allocate memory for new input device 1971 * 1972 * Returns prepared struct input_dev or %NULL. 1973 * 1974 * NOTE: Use input_free_device() to free devices that have not been 1975 * registered; input_unregister_device() should be used for already 1976 * registered devices. 1977 */ 1978 struct input_dev *input_allocate_device(void) 1979 { 1980 static atomic_t input_no = ATOMIC_INIT(-1); 1981 struct input_dev *dev; 1982 1983 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1984 if (!dev) 1985 return NULL; 1986 1987 /* 1988 * Start with space for SYN_REPORT + 7 EV_KEY/EV_MSC events + 2 spare, 1989 * see input_estimate_events_per_packet(). We will tune the number 1990 * when we register the device. 1991 */ 1992 dev->max_vals = 10; 1993 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 1994 if (!dev->vals) { 1995 kfree(dev); 1996 return NULL; 1997 } 1998 1999 mutex_init(&dev->mutex); 2000 spin_lock_init(&dev->event_lock); 2001 timer_setup(&dev->timer, NULL, 0); 2002 INIT_LIST_HEAD(&dev->h_list); 2003 INIT_LIST_HEAD(&dev->node); 2004 2005 dev->dev.type = &input_dev_type; 2006 dev->dev.class = &input_class; 2007 device_initialize(&dev->dev); 2008 /* 2009 * From this point on we can no longer simply "kfree(dev)", we need 2010 * to use input_free_device() so that device core properly frees its 2011 * resources associated with the input device. 2012 */ 2013 2014 dev_set_name(&dev->dev, "input%lu", 2015 (unsigned long)atomic_inc_return(&input_no)); 2016 2017 __module_get(THIS_MODULE); 2018 2019 return dev; 2020 } 2021 EXPORT_SYMBOL(input_allocate_device); 2022 2023 struct input_devres { 2024 struct input_dev *input; 2025 }; 2026 2027 static int devm_input_device_match(struct device *dev, void *res, void *data) 2028 { 2029 struct input_devres *devres = res; 2030 2031 return devres->input == data; 2032 } 2033 2034 static void devm_input_device_release(struct device *dev, void *res) 2035 { 2036 struct input_devres *devres = res; 2037 struct input_dev *input = devres->input; 2038 2039 dev_dbg(dev, "%s: dropping reference to %s\n", 2040 __func__, dev_name(&input->dev)); 2041 input_put_device(input); 2042 } 2043 2044 /** 2045 * devm_input_allocate_device - allocate managed input device 2046 * @dev: device owning the input device being created 2047 * 2048 * Returns prepared struct input_dev or %NULL. 2049 * 2050 * Managed input devices do not need to be explicitly unregistered or 2051 * freed as it will be done automatically when owner device unbinds from 2052 * its driver (or binding fails). Once managed input device is allocated, 2053 * it is ready to be set up and registered in the same fashion as regular 2054 * input device. There are no special devm_input_device_[un]register() 2055 * variants, regular ones work with both managed and unmanaged devices, 2056 * should you need them. In most cases however, managed input device need 2057 * not be explicitly unregistered or freed. 2058 * 2059 * NOTE: the owner device is set up as parent of input device and users 2060 * should not override it. 2061 */ 2062 struct input_dev *devm_input_allocate_device(struct device *dev) 2063 { 2064 struct input_dev *input; 2065 struct input_devres *devres; 2066 2067 devres = devres_alloc(devm_input_device_release, 2068 sizeof(*devres), GFP_KERNEL); 2069 if (!devres) 2070 return NULL; 2071 2072 input = input_allocate_device(); 2073 if (!input) { 2074 devres_free(devres); 2075 return NULL; 2076 } 2077 2078 input->dev.parent = dev; 2079 input->devres_managed = true; 2080 2081 devres->input = input; 2082 devres_add(dev, devres); 2083 2084 return input; 2085 } 2086 EXPORT_SYMBOL(devm_input_allocate_device); 2087 2088 /** 2089 * input_free_device - free memory occupied by input_dev structure 2090 * @dev: input device to free 2091 * 2092 * This function should only be used if input_register_device() 2093 * was not called yet or if it failed. Once device was registered 2094 * use input_unregister_device() and memory will be freed once last 2095 * reference to the device is dropped. 2096 * 2097 * Device should be allocated by input_allocate_device(). 2098 * 2099 * NOTE: If there are references to the input device then memory 2100 * will not be freed until last reference is dropped. 2101 */ 2102 void input_free_device(struct input_dev *dev) 2103 { 2104 if (dev) { 2105 if (dev->devres_managed) 2106 WARN_ON(devres_destroy(dev->dev.parent, 2107 devm_input_device_release, 2108 devm_input_device_match, 2109 dev)); 2110 input_put_device(dev); 2111 } 2112 } 2113 EXPORT_SYMBOL(input_free_device); 2114 2115 /** 2116 * input_set_timestamp - set timestamp for input events 2117 * @dev: input device to set timestamp for 2118 * @timestamp: the time at which the event has occurred 2119 * in CLOCK_MONOTONIC 2120 * 2121 * This function is intended to provide to the input system a more 2122 * accurate time of when an event actually occurred. The driver should 2123 * call this function as soon as a timestamp is acquired ensuring 2124 * clock conversions in input_set_timestamp are done correctly. 2125 * 2126 * The system entering suspend state between timestamp acquisition and 2127 * calling input_set_timestamp can result in inaccurate conversions. 2128 */ 2129 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 2130 { 2131 dev->timestamp[INPUT_CLK_MONO] = timestamp; 2132 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 2133 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 2134 TK_OFFS_BOOT); 2135 } 2136 EXPORT_SYMBOL(input_set_timestamp); 2137 2138 /** 2139 * input_get_timestamp - get timestamp for input events 2140 * @dev: input device to get timestamp from 2141 * 2142 * A valid timestamp is a timestamp of non-zero value. 2143 */ 2144 ktime_t *input_get_timestamp(struct input_dev *dev) 2145 { 2146 const ktime_t invalid_timestamp = ktime_set(0, 0); 2147 2148 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 2149 input_set_timestamp(dev, ktime_get()); 2150 2151 return dev->timestamp; 2152 } 2153 EXPORT_SYMBOL(input_get_timestamp); 2154 2155 /** 2156 * input_set_capability - mark device as capable of a certain event 2157 * @dev: device that is capable of emitting or accepting event 2158 * @type: type of the event (EV_KEY, EV_REL, etc...) 2159 * @code: event code 2160 * 2161 * In addition to setting up corresponding bit in appropriate capability 2162 * bitmap the function also adjusts dev->evbit. 2163 */ 2164 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 2165 { 2166 if (type < EV_CNT && input_max_code[type] && 2167 code > input_max_code[type]) { 2168 pr_err("%s: invalid code %u for type %u\n", __func__, code, 2169 type); 2170 dump_stack(); 2171 return; 2172 } 2173 2174 switch (type) { 2175 case EV_KEY: 2176 __set_bit(code, dev->keybit); 2177 break; 2178 2179 case EV_REL: 2180 __set_bit(code, dev->relbit); 2181 break; 2182 2183 case EV_ABS: 2184 input_alloc_absinfo(dev); 2185 __set_bit(code, dev->absbit); 2186 break; 2187 2188 case EV_MSC: 2189 __set_bit(code, dev->mscbit); 2190 break; 2191 2192 case EV_SW: 2193 __set_bit(code, dev->swbit); 2194 break; 2195 2196 case EV_LED: 2197 __set_bit(code, dev->ledbit); 2198 break; 2199 2200 case EV_SND: 2201 __set_bit(code, dev->sndbit); 2202 break; 2203 2204 case EV_FF: 2205 __set_bit(code, dev->ffbit); 2206 break; 2207 2208 case EV_PWR: 2209 /* do nothing */ 2210 break; 2211 2212 default: 2213 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2214 dump_stack(); 2215 return; 2216 } 2217 2218 __set_bit(type, dev->evbit); 2219 } 2220 EXPORT_SYMBOL(input_set_capability); 2221 2222 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2223 { 2224 int mt_slots; 2225 int i; 2226 unsigned int events; 2227 2228 if (dev->mt) { 2229 mt_slots = dev->mt->num_slots; 2230 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2231 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2232 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1; 2233 mt_slots = clamp(mt_slots, 2, 32); 2234 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2235 mt_slots = 2; 2236 } else { 2237 mt_slots = 0; 2238 } 2239 2240 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2241 2242 if (test_bit(EV_ABS, dev->evbit)) 2243 for_each_set_bit(i, dev->absbit, ABS_CNT) 2244 events += input_is_mt_axis(i) ? mt_slots : 1; 2245 2246 if (test_bit(EV_REL, dev->evbit)) 2247 events += bitmap_weight(dev->relbit, REL_CNT); 2248 2249 /* Make room for KEY and MSC events */ 2250 events += 7; 2251 2252 return events; 2253 } 2254 2255 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2256 do { \ 2257 if (!test_bit(EV_##type, dev->evbit)) \ 2258 memset(dev->bits##bit, 0, \ 2259 sizeof(dev->bits##bit)); \ 2260 } while (0) 2261 2262 static void input_cleanse_bitmasks(struct input_dev *dev) 2263 { 2264 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2265 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2266 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2267 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2268 INPUT_CLEANSE_BITMASK(dev, LED, led); 2269 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2270 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2271 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2272 } 2273 2274 static void __input_unregister_device(struct input_dev *dev) 2275 { 2276 struct input_handle *handle, *next; 2277 2278 input_disconnect_device(dev); 2279 2280 mutex_lock(&input_mutex); 2281 2282 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2283 handle->handler->disconnect(handle); 2284 WARN_ON(!list_empty(&dev->h_list)); 2285 2286 del_timer_sync(&dev->timer); 2287 list_del_init(&dev->node); 2288 2289 input_wakeup_procfs_readers(); 2290 2291 mutex_unlock(&input_mutex); 2292 2293 device_del(&dev->dev); 2294 } 2295 2296 static void devm_input_device_unregister(struct device *dev, void *res) 2297 { 2298 struct input_devres *devres = res; 2299 struct input_dev *input = devres->input; 2300 2301 dev_dbg(dev, "%s: unregistering device %s\n", 2302 __func__, dev_name(&input->dev)); 2303 __input_unregister_device(input); 2304 } 2305 2306 /* 2307 * Generate software autorepeat event. Note that we take 2308 * dev->event_lock here to avoid racing with input_event 2309 * which may cause keys get "stuck". 2310 */ 2311 static void input_repeat_key(struct timer_list *t) 2312 { 2313 struct input_dev *dev = from_timer(dev, t, timer); 2314 unsigned long flags; 2315 2316 spin_lock_irqsave(&dev->event_lock, flags); 2317 2318 if (!dev->inhibited && 2319 test_bit(dev->repeat_key, dev->key) && 2320 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 2321 2322 input_set_timestamp(dev, ktime_get()); 2323 input_handle_event(dev, EV_KEY, dev->repeat_key, 2); 2324 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 2325 2326 if (dev->rep[REP_PERIOD]) 2327 mod_timer(&dev->timer, jiffies + 2328 msecs_to_jiffies(dev->rep[REP_PERIOD])); 2329 } 2330 2331 spin_unlock_irqrestore(&dev->event_lock, flags); 2332 } 2333 2334 /** 2335 * input_enable_softrepeat - enable software autorepeat 2336 * @dev: input device 2337 * @delay: repeat delay 2338 * @period: repeat period 2339 * 2340 * Enable software autorepeat on the input device. 2341 */ 2342 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2343 { 2344 dev->timer.function = input_repeat_key; 2345 dev->rep[REP_DELAY] = delay; 2346 dev->rep[REP_PERIOD] = period; 2347 } 2348 EXPORT_SYMBOL(input_enable_softrepeat); 2349 2350 bool input_device_enabled(struct input_dev *dev) 2351 { 2352 lockdep_assert_held(&dev->mutex); 2353 2354 return !dev->inhibited && dev->users > 0; 2355 } 2356 EXPORT_SYMBOL_GPL(input_device_enabled); 2357 2358 static int input_device_tune_vals(struct input_dev *dev) 2359 { 2360 struct input_value *vals; 2361 unsigned int packet_size; 2362 unsigned int max_vals; 2363 2364 packet_size = input_estimate_events_per_packet(dev); 2365 if (dev->hint_events_per_packet < packet_size) 2366 dev->hint_events_per_packet = packet_size; 2367 2368 max_vals = dev->hint_events_per_packet + 2; 2369 if (dev->max_vals >= max_vals) 2370 return 0; 2371 2372 vals = kcalloc(max_vals, sizeof(*vals), GFP_KERNEL); 2373 if (!vals) 2374 return -ENOMEM; 2375 2376 spin_lock_irq(&dev->event_lock); 2377 dev->max_vals = max_vals; 2378 swap(dev->vals, vals); 2379 spin_unlock_irq(&dev->event_lock); 2380 2381 /* Because of swap() above, this frees the old vals memory */ 2382 kfree(vals); 2383 2384 return 0; 2385 } 2386 2387 /** 2388 * input_register_device - register device with input core 2389 * @dev: device to be registered 2390 * 2391 * This function registers device with input core. The device must be 2392 * allocated with input_allocate_device() and all it's capabilities 2393 * set up before registering. 2394 * If function fails the device must be freed with input_free_device(). 2395 * Once device has been successfully registered it can be unregistered 2396 * with input_unregister_device(); input_free_device() should not be 2397 * called in this case. 2398 * 2399 * Note that this function is also used to register managed input devices 2400 * (ones allocated with devm_input_allocate_device()). Such managed input 2401 * devices need not be explicitly unregistered or freed, their tear down 2402 * is controlled by the devres infrastructure. It is also worth noting 2403 * that tear down of managed input devices is internally a 2-step process: 2404 * registered managed input device is first unregistered, but stays in 2405 * memory and can still handle input_event() calls (although events will 2406 * not be delivered anywhere). The freeing of managed input device will 2407 * happen later, when devres stack is unwound to the point where device 2408 * allocation was made. 2409 */ 2410 int input_register_device(struct input_dev *dev) 2411 { 2412 struct input_devres *devres = NULL; 2413 struct input_handler *handler; 2414 const char *path; 2415 int error; 2416 2417 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2418 dev_err(&dev->dev, 2419 "Absolute device without dev->absinfo, refusing to register\n"); 2420 return -EINVAL; 2421 } 2422 2423 if (dev->devres_managed) { 2424 devres = devres_alloc(devm_input_device_unregister, 2425 sizeof(*devres), GFP_KERNEL); 2426 if (!devres) 2427 return -ENOMEM; 2428 2429 devres->input = dev; 2430 } 2431 2432 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2433 __set_bit(EV_SYN, dev->evbit); 2434 2435 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2436 __clear_bit(KEY_RESERVED, dev->keybit); 2437 2438 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2439 input_cleanse_bitmasks(dev); 2440 2441 error = input_device_tune_vals(dev); 2442 if (error) 2443 goto err_devres_free; 2444 2445 /* 2446 * If delay and period are pre-set by the driver, then autorepeating 2447 * is handled by the driver itself and we don't do it in input.c. 2448 */ 2449 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2450 input_enable_softrepeat(dev, 250, 33); 2451 2452 if (!dev->getkeycode) 2453 dev->getkeycode = input_default_getkeycode; 2454 2455 if (!dev->setkeycode) 2456 dev->setkeycode = input_default_setkeycode; 2457 2458 if (dev->poller) 2459 input_dev_poller_finalize(dev->poller); 2460 2461 error = device_add(&dev->dev); 2462 if (error) 2463 goto err_devres_free; 2464 2465 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2466 pr_info("%s as %s\n", 2467 dev->name ? dev->name : "Unspecified device", 2468 path ? path : "N/A"); 2469 kfree(path); 2470 2471 error = mutex_lock_interruptible(&input_mutex); 2472 if (error) 2473 goto err_device_del; 2474 2475 list_add_tail(&dev->node, &input_dev_list); 2476 2477 list_for_each_entry(handler, &input_handler_list, node) 2478 input_attach_handler(dev, handler); 2479 2480 input_wakeup_procfs_readers(); 2481 2482 mutex_unlock(&input_mutex); 2483 2484 if (dev->devres_managed) { 2485 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2486 __func__, dev_name(&dev->dev)); 2487 devres_add(dev->dev.parent, devres); 2488 } 2489 return 0; 2490 2491 err_device_del: 2492 device_del(&dev->dev); 2493 err_devres_free: 2494 devres_free(devres); 2495 return error; 2496 } 2497 EXPORT_SYMBOL(input_register_device); 2498 2499 /** 2500 * input_unregister_device - unregister previously registered device 2501 * @dev: device to be unregistered 2502 * 2503 * This function unregisters an input device. Once device is unregistered 2504 * the caller should not try to access it as it may get freed at any moment. 2505 */ 2506 void input_unregister_device(struct input_dev *dev) 2507 { 2508 if (dev->devres_managed) { 2509 WARN_ON(devres_destroy(dev->dev.parent, 2510 devm_input_device_unregister, 2511 devm_input_device_match, 2512 dev)); 2513 __input_unregister_device(dev); 2514 /* 2515 * We do not do input_put_device() here because it will be done 2516 * when 2nd devres fires up. 2517 */ 2518 } else { 2519 __input_unregister_device(dev); 2520 input_put_device(dev); 2521 } 2522 } 2523 EXPORT_SYMBOL(input_unregister_device); 2524 2525 static int input_handler_check_methods(const struct input_handler *handler) 2526 { 2527 int count = 0; 2528 2529 if (handler->filter) 2530 count++; 2531 if (handler->events) 2532 count++; 2533 if (handler->event) 2534 count++; 2535 2536 if (count > 1) { 2537 pr_err("%s: only one event processing method can be defined (%s)\n", 2538 __func__, handler->name); 2539 return -EINVAL; 2540 } 2541 2542 return 0; 2543 } 2544 2545 /* 2546 * An implementation of input_handler's events() method that simply 2547 * invokes handler->event() method for each event one by one. 2548 */ 2549 static unsigned int input_handler_events_default(struct input_handle *handle, 2550 struct input_value *vals, 2551 unsigned int count) 2552 { 2553 struct input_handler *handler = handle->handler; 2554 struct input_value *v; 2555 2556 for (v = vals; v != vals + count; v++) 2557 handler->event(handle, v->type, v->code, v->value); 2558 2559 return count; 2560 } 2561 2562 /* 2563 * An implementation of input_handler's events() method that invokes 2564 * handler->filter() method for each event one by one and removes events 2565 * that were filtered out from the "vals" array. 2566 */ 2567 static unsigned int input_handler_events_filter(struct input_handle *handle, 2568 struct input_value *vals, 2569 unsigned int count) 2570 { 2571 struct input_handler *handler = handle->handler; 2572 struct input_value *end = vals; 2573 struct input_value *v; 2574 2575 for (v = vals; v != vals + count; v++) { 2576 if (handler->filter(handle, v->type, v->code, v->value)) 2577 continue; 2578 if (end != v) 2579 *end = *v; 2580 end++; 2581 } 2582 2583 return end - vals; 2584 } 2585 2586 /* 2587 * An implementation of input_handler's events() method that does nothing. 2588 */ 2589 static unsigned int input_handler_events_null(struct input_handle *handle, 2590 struct input_value *vals, 2591 unsigned int count) 2592 { 2593 return count; 2594 } 2595 2596 /** 2597 * input_register_handler - register a new input handler 2598 * @handler: handler to be registered 2599 * 2600 * This function registers a new input handler (interface) for input 2601 * devices in the system and attaches it to all input devices that 2602 * are compatible with the handler. 2603 */ 2604 int input_register_handler(struct input_handler *handler) 2605 { 2606 struct input_dev *dev; 2607 int error; 2608 2609 error = input_handler_check_methods(handler); 2610 if (error) 2611 return error; 2612 2613 INIT_LIST_HEAD(&handler->h_list); 2614 2615 if (handler->filter) 2616 handler->events = input_handler_events_filter; 2617 else if (handler->event) 2618 handler->events = input_handler_events_default; 2619 else if (!handler->events) 2620 handler->events = input_handler_events_null; 2621 2622 error = mutex_lock_interruptible(&input_mutex); 2623 if (error) 2624 return error; 2625 2626 list_add_tail(&handler->node, &input_handler_list); 2627 2628 list_for_each_entry(dev, &input_dev_list, node) 2629 input_attach_handler(dev, handler); 2630 2631 input_wakeup_procfs_readers(); 2632 2633 mutex_unlock(&input_mutex); 2634 return 0; 2635 } 2636 EXPORT_SYMBOL(input_register_handler); 2637 2638 /** 2639 * input_unregister_handler - unregisters an input handler 2640 * @handler: handler to be unregistered 2641 * 2642 * This function disconnects a handler from its input devices and 2643 * removes it from lists of known handlers. 2644 */ 2645 void input_unregister_handler(struct input_handler *handler) 2646 { 2647 struct input_handle *handle, *next; 2648 2649 mutex_lock(&input_mutex); 2650 2651 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2652 handler->disconnect(handle); 2653 WARN_ON(!list_empty(&handler->h_list)); 2654 2655 list_del_init(&handler->node); 2656 2657 input_wakeup_procfs_readers(); 2658 2659 mutex_unlock(&input_mutex); 2660 } 2661 EXPORT_SYMBOL(input_unregister_handler); 2662 2663 /** 2664 * input_handler_for_each_handle - handle iterator 2665 * @handler: input handler to iterate 2666 * @data: data for the callback 2667 * @fn: function to be called for each handle 2668 * 2669 * Iterate over @bus's list of devices, and call @fn for each, passing 2670 * it @data and stop when @fn returns a non-zero value. The function is 2671 * using RCU to traverse the list and therefore may be using in atomic 2672 * contexts. The @fn callback is invoked from RCU critical section and 2673 * thus must not sleep. 2674 */ 2675 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2676 int (*fn)(struct input_handle *, void *)) 2677 { 2678 struct input_handle *handle; 2679 int retval = 0; 2680 2681 rcu_read_lock(); 2682 2683 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2684 retval = fn(handle, data); 2685 if (retval) 2686 break; 2687 } 2688 2689 rcu_read_unlock(); 2690 2691 return retval; 2692 } 2693 EXPORT_SYMBOL(input_handler_for_each_handle); 2694 2695 /** 2696 * input_register_handle - register a new input handle 2697 * @handle: handle to register 2698 * 2699 * This function puts a new input handle onto device's 2700 * and handler's lists so that events can flow through 2701 * it once it is opened using input_open_device(). 2702 * 2703 * This function is supposed to be called from handler's 2704 * connect() method. 2705 */ 2706 int input_register_handle(struct input_handle *handle) 2707 { 2708 struct input_handler *handler = handle->handler; 2709 struct input_dev *dev = handle->dev; 2710 int error; 2711 2712 /* 2713 * We take dev->mutex here to prevent race with 2714 * input_release_device(). 2715 */ 2716 error = mutex_lock_interruptible(&dev->mutex); 2717 if (error) 2718 return error; 2719 2720 /* 2721 * Filters go to the head of the list, normal handlers 2722 * to the tail. 2723 */ 2724 if (handler->filter) 2725 list_add_rcu(&handle->d_node, &dev->h_list); 2726 else 2727 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2728 2729 mutex_unlock(&dev->mutex); 2730 2731 /* 2732 * Since we are supposed to be called from ->connect() 2733 * which is mutually exclusive with ->disconnect() 2734 * we can't be racing with input_unregister_handle() 2735 * and so separate lock is not needed here. 2736 */ 2737 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2738 2739 if (handler->start) 2740 handler->start(handle); 2741 2742 return 0; 2743 } 2744 EXPORT_SYMBOL(input_register_handle); 2745 2746 /** 2747 * input_unregister_handle - unregister an input handle 2748 * @handle: handle to unregister 2749 * 2750 * This function removes input handle from device's 2751 * and handler's lists. 2752 * 2753 * This function is supposed to be called from handler's 2754 * disconnect() method. 2755 */ 2756 void input_unregister_handle(struct input_handle *handle) 2757 { 2758 struct input_dev *dev = handle->dev; 2759 2760 list_del_rcu(&handle->h_node); 2761 2762 /* 2763 * Take dev->mutex to prevent race with input_release_device(). 2764 */ 2765 mutex_lock(&dev->mutex); 2766 list_del_rcu(&handle->d_node); 2767 mutex_unlock(&dev->mutex); 2768 2769 synchronize_rcu(); 2770 } 2771 EXPORT_SYMBOL(input_unregister_handle); 2772 2773 /** 2774 * input_get_new_minor - allocates a new input minor number 2775 * @legacy_base: beginning or the legacy range to be searched 2776 * @legacy_num: size of legacy range 2777 * @allow_dynamic: whether we can also take ID from the dynamic range 2778 * 2779 * This function allocates a new device minor for from input major namespace. 2780 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2781 * parameters and whether ID can be allocated from dynamic range if there are 2782 * no free IDs in legacy range. 2783 */ 2784 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2785 bool allow_dynamic) 2786 { 2787 /* 2788 * This function should be called from input handler's ->connect() 2789 * methods, which are serialized with input_mutex, so no additional 2790 * locking is needed here. 2791 */ 2792 if (legacy_base >= 0) { 2793 int minor = ida_alloc_range(&input_ida, legacy_base, 2794 legacy_base + legacy_num - 1, 2795 GFP_KERNEL); 2796 if (minor >= 0 || !allow_dynamic) 2797 return minor; 2798 } 2799 2800 return ida_alloc_range(&input_ida, INPUT_FIRST_DYNAMIC_DEV, 2801 INPUT_MAX_CHAR_DEVICES - 1, GFP_KERNEL); 2802 } 2803 EXPORT_SYMBOL(input_get_new_minor); 2804 2805 /** 2806 * input_free_minor - release previously allocated minor 2807 * @minor: minor to be released 2808 * 2809 * This function releases previously allocated input minor so that it can be 2810 * reused later. 2811 */ 2812 void input_free_minor(unsigned int minor) 2813 { 2814 ida_free(&input_ida, minor); 2815 } 2816 EXPORT_SYMBOL(input_free_minor); 2817 2818 static int __init input_init(void) 2819 { 2820 int err; 2821 2822 err = class_register(&input_class); 2823 if (err) { 2824 pr_err("unable to register input_dev class\n"); 2825 return err; 2826 } 2827 2828 err = input_proc_init(); 2829 if (err) 2830 goto fail1; 2831 2832 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2833 INPUT_MAX_CHAR_DEVICES, "input"); 2834 if (err) { 2835 pr_err("unable to register char major %d", INPUT_MAJOR); 2836 goto fail2; 2837 } 2838 2839 return 0; 2840 2841 fail2: input_proc_exit(); 2842 fail1: class_unregister(&input_class); 2843 return err; 2844 } 2845 2846 static void __exit input_exit(void) 2847 { 2848 input_proc_exit(); 2849 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2850 INPUT_MAX_CHAR_DEVICES); 2851 class_unregister(&input_class); 2852 } 2853 2854 subsys_initcall(input_init); 2855 module_exit(input_exit); 2856