1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Char device for device raw access 4 * 5 * Copyright (C) 2005-2007 Kristian Hoegsberg <[email protected]> 6 */ 7 8 #include <linux/bug.h> 9 #include <linux/compat.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/errno.h> 15 #include <linux/firewire.h> 16 #include <linux/firewire-cdev.h> 17 #include <linux/idr.h> 18 #include <linux/irqflags.h> 19 #include <linux/jiffies.h> 20 #include <linux/kernel.h> 21 #include <linux/kref.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/mutex.h> 25 #include <linux/poll.h> 26 #include <linux/sched.h> /* required for linux/wait.h */ 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/string.h> 30 #include <linux/time.h> 31 #include <linux/uaccess.h> 32 #include <linux/vmalloc.h> 33 #include <linux/wait.h> 34 #include <linux/workqueue.h> 35 36 37 #include "core.h" 38 #include <trace/events/firewire.h> 39 40 #include "packet-header-definitions.h" 41 42 /* 43 * ABI version history is documented in linux/firewire-cdev.h. 44 */ 45 #define FW_CDEV_KERNEL_VERSION 5 46 #define FW_CDEV_VERSION_EVENT_REQUEST2 4 47 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 48 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 49 #define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6 50 51 struct client { 52 u32 version; 53 struct fw_device *device; 54 55 spinlock_t lock; 56 bool in_shutdown; 57 struct idr resource_idr; 58 struct list_head event_list; 59 wait_queue_head_t wait; 60 wait_queue_head_t tx_flush_wait; 61 u64 bus_reset_closure; 62 63 struct fw_iso_context *iso_context; 64 u64 iso_closure; 65 struct fw_iso_buffer buffer; 66 unsigned long vm_start; 67 bool buffer_is_mapped; 68 69 struct list_head phy_receiver_link; 70 u64 phy_receiver_closure; 71 72 struct list_head link; 73 struct kref kref; 74 }; 75 76 static inline void client_get(struct client *client) 77 { 78 kref_get(&client->kref); 79 } 80 81 static void client_release(struct kref *kref) 82 { 83 struct client *client = container_of(kref, struct client, kref); 84 85 fw_device_put(client->device); 86 kfree(client); 87 } 88 89 static void client_put(struct client *client) 90 { 91 kref_put(&client->kref, client_release); 92 } 93 94 struct client_resource; 95 typedef void (*client_resource_release_fn_t)(struct client *, 96 struct client_resource *); 97 struct client_resource { 98 client_resource_release_fn_t release; 99 int handle; 100 }; 101 102 struct address_handler_resource { 103 struct client_resource resource; 104 struct fw_address_handler handler; 105 __u64 closure; 106 struct client *client; 107 }; 108 109 struct outbound_transaction_resource { 110 struct client_resource resource; 111 struct fw_transaction transaction; 112 }; 113 114 struct inbound_transaction_resource { 115 struct client_resource resource; 116 struct fw_card *card; 117 struct fw_request *request; 118 bool is_fcp; 119 void *data; 120 size_t length; 121 }; 122 123 struct descriptor_resource { 124 struct client_resource resource; 125 struct fw_descriptor descriptor; 126 u32 data[]; 127 }; 128 129 struct iso_resource { 130 struct client_resource resource; 131 struct client *client; 132 /* Schedule work and access todo only with client->lock held. */ 133 struct delayed_work work; 134 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, 135 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; 136 int generation; 137 u64 channels; 138 s32 bandwidth; 139 struct iso_resource_event *e_alloc, *e_dealloc; 140 }; 141 142 static void release_iso_resource(struct client *, struct client_resource *); 143 144 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) 145 { 146 client_get(r->client); 147 if (!queue_delayed_work(fw_workqueue, &r->work, delay)) 148 client_put(r->client); 149 } 150 151 static void schedule_if_iso_resource(struct client_resource *resource) 152 { 153 if (resource->release == release_iso_resource) 154 schedule_iso_resource(container_of(resource, 155 struct iso_resource, resource), 0); 156 } 157 158 /* 159 * dequeue_event() just kfree()'s the event, so the event has to be 160 * the first field in a struct XYZ_event. 161 */ 162 struct event { 163 struct { void *data; size_t size; } v[2]; 164 struct list_head link; 165 }; 166 167 struct bus_reset_event { 168 struct event event; 169 struct fw_cdev_event_bus_reset reset; 170 }; 171 172 struct outbound_transaction_event { 173 struct event event; 174 struct client *client; 175 struct outbound_transaction_resource r; 176 union { 177 struct fw_cdev_event_response without_tstamp; 178 struct fw_cdev_event_response2 with_tstamp; 179 } rsp; 180 }; 181 182 struct inbound_transaction_event { 183 struct event event; 184 union { 185 struct fw_cdev_event_request request; 186 struct fw_cdev_event_request2 request2; 187 struct fw_cdev_event_request3 with_tstamp; 188 } req; 189 }; 190 191 struct iso_interrupt_event { 192 struct event event; 193 struct fw_cdev_event_iso_interrupt interrupt; 194 }; 195 196 struct iso_interrupt_mc_event { 197 struct event event; 198 struct fw_cdev_event_iso_interrupt_mc interrupt; 199 }; 200 201 struct iso_resource_event { 202 struct event event; 203 struct fw_cdev_event_iso_resource iso_resource; 204 }; 205 206 struct outbound_phy_packet_event { 207 struct event event; 208 struct client *client; 209 struct fw_packet p; 210 union { 211 struct fw_cdev_event_phy_packet without_tstamp; 212 struct fw_cdev_event_phy_packet2 with_tstamp; 213 } phy_packet; 214 }; 215 216 struct inbound_phy_packet_event { 217 struct event event; 218 union { 219 struct fw_cdev_event_phy_packet without_tstamp; 220 struct fw_cdev_event_phy_packet2 with_tstamp; 221 } phy_packet; 222 }; 223 224 #ifdef CONFIG_COMPAT 225 static void __user *u64_to_uptr(u64 value) 226 { 227 if (in_compat_syscall()) 228 return compat_ptr(value); 229 else 230 return (void __user *)(unsigned long)value; 231 } 232 233 static u64 uptr_to_u64(void __user *ptr) 234 { 235 if (in_compat_syscall()) 236 return ptr_to_compat(ptr); 237 else 238 return (u64)(unsigned long)ptr; 239 } 240 #else 241 static inline void __user *u64_to_uptr(u64 value) 242 { 243 return (void __user *)(unsigned long)value; 244 } 245 246 static inline u64 uptr_to_u64(void __user *ptr) 247 { 248 return (u64)(unsigned long)ptr; 249 } 250 #endif /* CONFIG_COMPAT */ 251 252 static int fw_device_op_open(struct inode *inode, struct file *file) 253 { 254 struct fw_device *device; 255 struct client *client; 256 257 device = fw_device_get_by_devt(inode->i_rdev); 258 if (device == NULL) 259 return -ENODEV; 260 261 if (fw_device_is_shutdown(device)) { 262 fw_device_put(device); 263 return -ENODEV; 264 } 265 266 client = kzalloc(sizeof(*client), GFP_KERNEL); 267 if (client == NULL) { 268 fw_device_put(device); 269 return -ENOMEM; 270 } 271 272 client->device = device; 273 spin_lock_init(&client->lock); 274 idr_init(&client->resource_idr); 275 INIT_LIST_HEAD(&client->event_list); 276 init_waitqueue_head(&client->wait); 277 init_waitqueue_head(&client->tx_flush_wait); 278 INIT_LIST_HEAD(&client->phy_receiver_link); 279 INIT_LIST_HEAD(&client->link); 280 kref_init(&client->kref); 281 282 file->private_data = client; 283 284 return nonseekable_open(inode, file); 285 } 286 287 static void queue_event(struct client *client, struct event *event, 288 void *data0, size_t size0, void *data1, size_t size1) 289 { 290 event->v[0].data = data0; 291 event->v[0].size = size0; 292 event->v[1].data = data1; 293 event->v[1].size = size1; 294 295 scoped_guard(spinlock_irqsave, &client->lock) { 296 if (client->in_shutdown) 297 kfree(event); 298 else 299 list_add_tail(&event->link, &client->event_list); 300 } 301 302 wake_up_interruptible(&client->wait); 303 } 304 305 static int dequeue_event(struct client *client, 306 char __user *buffer, size_t count) 307 { 308 struct event *event; 309 size_t size, total; 310 int i, ret; 311 312 ret = wait_event_interruptible(client->wait, 313 !list_empty(&client->event_list) || 314 fw_device_is_shutdown(client->device)); 315 if (ret < 0) 316 return ret; 317 318 if (list_empty(&client->event_list) && 319 fw_device_is_shutdown(client->device)) 320 return -ENODEV; 321 322 scoped_guard(spinlock_irq, &client->lock) { 323 event = list_first_entry(&client->event_list, struct event, link); 324 list_del(&event->link); 325 } 326 327 total = 0; 328 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 329 size = min(event->v[i].size, count - total); 330 if (copy_to_user(buffer + total, event->v[i].data, size)) { 331 ret = -EFAULT; 332 goto out; 333 } 334 total += size; 335 } 336 ret = total; 337 338 out: 339 kfree(event); 340 341 return ret; 342 } 343 344 static ssize_t fw_device_op_read(struct file *file, char __user *buffer, 345 size_t count, loff_t *offset) 346 { 347 struct client *client = file->private_data; 348 349 return dequeue_event(client, buffer, count); 350 } 351 352 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, 353 struct client *client) 354 { 355 struct fw_card *card = client->device->card; 356 357 guard(spinlock_irq)(&card->lock); 358 359 event->closure = client->bus_reset_closure; 360 event->type = FW_CDEV_EVENT_BUS_RESET; 361 event->generation = client->device->generation; 362 event->node_id = client->device->node_id; 363 event->local_node_id = card->local_node->node_id; 364 event->bm_node_id = card->bm_node_id; 365 event->irm_node_id = card->irm_node->node_id; 366 event->root_node_id = card->root_node->node_id; 367 } 368 369 static void for_each_client(struct fw_device *device, 370 void (*callback)(struct client *client)) 371 { 372 struct client *c; 373 374 guard(mutex)(&device->client_list_mutex); 375 376 list_for_each_entry(c, &device->client_list, link) 377 callback(c); 378 } 379 380 static int schedule_reallocations(int id, void *p, void *data) 381 { 382 schedule_if_iso_resource(p); 383 384 return 0; 385 } 386 387 static void queue_bus_reset_event(struct client *client) 388 { 389 struct bus_reset_event *e; 390 391 e = kzalloc(sizeof(*e), GFP_KERNEL); 392 if (e == NULL) 393 return; 394 395 fill_bus_reset_event(&e->reset, client); 396 397 queue_event(client, &e->event, 398 &e->reset, sizeof(e->reset), NULL, 0); 399 400 guard(spinlock_irq)(&client->lock); 401 402 idr_for_each(&client->resource_idr, schedule_reallocations, client); 403 } 404 405 void fw_device_cdev_update(struct fw_device *device) 406 { 407 for_each_client(device, queue_bus_reset_event); 408 } 409 410 static void wake_up_client(struct client *client) 411 { 412 wake_up_interruptible(&client->wait); 413 } 414 415 void fw_device_cdev_remove(struct fw_device *device) 416 { 417 for_each_client(device, wake_up_client); 418 } 419 420 union ioctl_arg { 421 struct fw_cdev_get_info get_info; 422 struct fw_cdev_send_request send_request; 423 struct fw_cdev_allocate allocate; 424 struct fw_cdev_deallocate deallocate; 425 struct fw_cdev_send_response send_response; 426 struct fw_cdev_initiate_bus_reset initiate_bus_reset; 427 struct fw_cdev_add_descriptor add_descriptor; 428 struct fw_cdev_remove_descriptor remove_descriptor; 429 struct fw_cdev_create_iso_context create_iso_context; 430 struct fw_cdev_queue_iso queue_iso; 431 struct fw_cdev_start_iso start_iso; 432 struct fw_cdev_stop_iso stop_iso; 433 struct fw_cdev_get_cycle_timer get_cycle_timer; 434 struct fw_cdev_allocate_iso_resource allocate_iso_resource; 435 struct fw_cdev_send_stream_packet send_stream_packet; 436 struct fw_cdev_get_cycle_timer2 get_cycle_timer2; 437 struct fw_cdev_send_phy_packet send_phy_packet; 438 struct fw_cdev_receive_phy_packets receive_phy_packets; 439 struct fw_cdev_set_iso_channels set_iso_channels; 440 struct fw_cdev_flush_iso flush_iso; 441 }; 442 443 static int ioctl_get_info(struct client *client, union ioctl_arg *arg) 444 { 445 struct fw_cdev_get_info *a = &arg->get_info; 446 struct fw_cdev_event_bus_reset bus_reset; 447 unsigned long ret = 0; 448 449 client->version = a->version; 450 a->version = FW_CDEV_KERNEL_VERSION; 451 a->card = client->device->card->index; 452 453 scoped_guard(rwsem_read, &fw_device_rwsem) { 454 if (a->rom != 0) { 455 size_t want = a->rom_length; 456 size_t have = client->device->config_rom_length * 4; 457 458 ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom, 459 min(want, have)); 460 if (ret != 0) 461 return -EFAULT; 462 } 463 a->rom_length = client->device->config_rom_length * 4; 464 } 465 466 guard(mutex)(&client->device->client_list_mutex); 467 468 client->bus_reset_closure = a->bus_reset_closure; 469 if (a->bus_reset != 0) { 470 fill_bus_reset_event(&bus_reset, client); 471 /* unaligned size of bus_reset is 36 bytes */ 472 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); 473 } 474 if (ret == 0 && list_empty(&client->link)) 475 list_add_tail(&client->link, &client->device->client_list); 476 477 return ret ? -EFAULT : 0; 478 } 479 480 static int add_client_resource(struct client *client, 481 struct client_resource *resource, gfp_t gfp_mask) 482 { 483 bool preload = gfpflags_allow_blocking(gfp_mask); 484 int ret; 485 486 if (preload) 487 idr_preload(gfp_mask); 488 489 scoped_guard(spinlock_irqsave, &client->lock) { 490 if (client->in_shutdown) 491 ret = -ECANCELED; 492 else 493 ret = idr_alloc(&client->resource_idr, resource, 0, 0, GFP_NOWAIT); 494 if (ret >= 0) { 495 resource->handle = ret; 496 client_get(client); 497 schedule_if_iso_resource(resource); 498 } 499 } 500 501 if (preload) 502 idr_preload_end(); 503 504 return ret < 0 ? ret : 0; 505 } 506 507 static int release_client_resource(struct client *client, u32 handle, 508 client_resource_release_fn_t release, 509 struct client_resource **return_resource) 510 { 511 struct client_resource *resource; 512 513 scoped_guard(spinlock_irq, &client->lock) { 514 if (client->in_shutdown) 515 return -EINVAL; 516 517 resource = idr_find(&client->resource_idr, handle); 518 if (!resource || resource->release != release) 519 return -EINVAL; 520 521 idr_remove(&client->resource_idr, handle); 522 } 523 524 if (return_resource) 525 *return_resource = resource; 526 else 527 resource->release(client, resource); 528 529 client_put(client); 530 531 return 0; 532 } 533 534 static void release_transaction(struct client *client, 535 struct client_resource *resource) 536 { 537 } 538 539 static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp, 540 u32 response_tstamp, void *payload, size_t length, void *data) 541 { 542 struct outbound_transaction_event *e = data; 543 struct client *client = e->client; 544 545 scoped_guard(spinlock_irqsave, &client->lock) { 546 idr_remove(&client->resource_idr, e->r.resource.handle); 547 if (client->in_shutdown) 548 wake_up(&client->tx_flush_wait); 549 } 550 551 switch (e->rsp.without_tstamp.type) { 552 case FW_CDEV_EVENT_RESPONSE: 553 { 554 struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp; 555 556 if (length < rsp->length) 557 rsp->length = length; 558 if (rcode == RCODE_COMPLETE) 559 memcpy(rsp->data, payload, rsp->length); 560 561 rsp->rcode = rcode; 562 563 // In the case that sizeof(*rsp) doesn't align with the position of the 564 // data, and the read is short, preserve an extra copy of the data 565 // to stay compatible with a pre-2.6.27 bug. Since the bug is harmless 566 // for short reads and some apps depended on it, this is both safe 567 // and prudent for compatibility. 568 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) 569 queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length); 570 else 571 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); 572 573 break; 574 } 575 case FW_CDEV_EVENT_RESPONSE2: 576 { 577 struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp; 578 579 if (length < rsp->length) 580 rsp->length = length; 581 if (rcode == RCODE_COMPLETE) 582 memcpy(rsp->data, payload, rsp->length); 583 584 rsp->rcode = rcode; 585 rsp->request_tstamp = request_tstamp; 586 rsp->response_tstamp = response_tstamp; 587 588 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); 589 590 break; 591 } 592 default: 593 WARN_ON(1); 594 break; 595 } 596 597 /* Drop the idr's reference */ 598 client_put(client); 599 } 600 601 static int init_request(struct client *client, 602 struct fw_cdev_send_request *request, 603 int destination_id, int speed) 604 { 605 struct outbound_transaction_event *e; 606 void *payload; 607 int ret; 608 609 if (request->tcode != TCODE_STREAM_DATA && 610 (request->length > 4096 || request->length > 512 << speed)) 611 return -EIO; 612 613 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && 614 request->length < 4) 615 return -EINVAL; 616 617 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); 618 if (e == NULL) 619 return -ENOMEM; 620 e->client = client; 621 622 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { 623 struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp; 624 625 rsp->type = FW_CDEV_EVENT_RESPONSE; 626 rsp->length = request->length; 627 rsp->closure = request->closure; 628 payload = rsp->data; 629 } else { 630 struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp; 631 632 rsp->type = FW_CDEV_EVENT_RESPONSE2; 633 rsp->length = request->length; 634 rsp->closure = request->closure; 635 payload = rsp->data; 636 } 637 638 if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) { 639 ret = -EFAULT; 640 goto failed; 641 } 642 643 e->r.resource.release = release_transaction; 644 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); 645 if (ret < 0) 646 goto failed; 647 648 fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode, 649 destination_id, request->generation, speed, request->offset, 650 payload, request->length, complete_transaction, e); 651 return 0; 652 653 failed: 654 kfree(e); 655 656 return ret; 657 } 658 659 static int ioctl_send_request(struct client *client, union ioctl_arg *arg) 660 { 661 switch (arg->send_request.tcode) { 662 case TCODE_WRITE_QUADLET_REQUEST: 663 case TCODE_WRITE_BLOCK_REQUEST: 664 case TCODE_READ_QUADLET_REQUEST: 665 case TCODE_READ_BLOCK_REQUEST: 666 case TCODE_LOCK_MASK_SWAP: 667 case TCODE_LOCK_COMPARE_SWAP: 668 case TCODE_LOCK_FETCH_ADD: 669 case TCODE_LOCK_LITTLE_ADD: 670 case TCODE_LOCK_BOUNDED_ADD: 671 case TCODE_LOCK_WRAP_ADD: 672 case TCODE_LOCK_VENDOR_DEPENDENT: 673 break; 674 default: 675 return -EINVAL; 676 } 677 678 return init_request(client, &arg->send_request, client->device->node_id, 679 client->device->max_speed); 680 } 681 682 static void release_request(struct client *client, 683 struct client_resource *resource) 684 { 685 struct inbound_transaction_resource *r = container_of(resource, 686 struct inbound_transaction_resource, resource); 687 688 if (r->is_fcp) 689 fw_request_put(r->request); 690 else 691 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); 692 693 fw_card_put(r->card); 694 kfree(r); 695 } 696 697 static void handle_request(struct fw_card *card, struct fw_request *request, 698 int tcode, int destination, int source, 699 int generation, unsigned long long offset, 700 void *payload, size_t length, void *callback_data) 701 { 702 struct address_handler_resource *handler = callback_data; 703 bool is_fcp = is_in_fcp_region(offset, length); 704 struct inbound_transaction_resource *r; 705 struct inbound_transaction_event *e; 706 size_t event_size0; 707 int ret; 708 709 /* card may be different from handler->client->device->card */ 710 fw_card_get(card); 711 712 // Extend the lifetime of data for request so that its payload is safely accessible in 713 // the process context for the client. 714 if (is_fcp) 715 fw_request_get(request); 716 717 r = kmalloc(sizeof(*r), GFP_ATOMIC); 718 e = kmalloc(sizeof(*e), GFP_ATOMIC); 719 if (r == NULL || e == NULL) 720 goto failed; 721 722 r->card = card; 723 r->request = request; 724 r->is_fcp = is_fcp; 725 r->data = payload; 726 r->length = length; 727 728 r->resource.release = release_request; 729 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); 730 if (ret < 0) 731 goto failed; 732 733 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { 734 struct fw_cdev_event_request *req = &e->req.request; 735 736 if (tcode & 0x10) 737 tcode = TCODE_LOCK_REQUEST; 738 739 req->type = FW_CDEV_EVENT_REQUEST; 740 req->tcode = tcode; 741 req->offset = offset; 742 req->length = length; 743 req->handle = r->resource.handle; 744 req->closure = handler->closure; 745 event_size0 = sizeof(*req); 746 } else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { 747 struct fw_cdev_event_request2 *req = &e->req.request2; 748 749 req->type = FW_CDEV_EVENT_REQUEST2; 750 req->tcode = tcode; 751 req->offset = offset; 752 req->source_node_id = source; 753 req->destination_node_id = destination; 754 req->card = card->index; 755 req->generation = generation; 756 req->length = length; 757 req->handle = r->resource.handle; 758 req->closure = handler->closure; 759 event_size0 = sizeof(*req); 760 } else { 761 struct fw_cdev_event_request3 *req = &e->req.with_tstamp; 762 763 req->type = FW_CDEV_EVENT_REQUEST3; 764 req->tcode = tcode; 765 req->offset = offset; 766 req->source_node_id = source; 767 req->destination_node_id = destination; 768 req->card = card->index; 769 req->generation = generation; 770 req->length = length; 771 req->handle = r->resource.handle; 772 req->closure = handler->closure; 773 req->tstamp = fw_request_get_timestamp(request); 774 event_size0 = sizeof(*req); 775 } 776 777 queue_event(handler->client, &e->event, 778 &e->req, event_size0, r->data, length); 779 return; 780 781 failed: 782 kfree(r); 783 kfree(e); 784 785 if (!is_fcp) 786 fw_send_response(card, request, RCODE_CONFLICT_ERROR); 787 else 788 fw_request_put(request); 789 790 fw_card_put(card); 791 } 792 793 static void release_address_handler(struct client *client, 794 struct client_resource *resource) 795 { 796 struct address_handler_resource *r = 797 container_of(resource, struct address_handler_resource, resource); 798 799 fw_core_remove_address_handler(&r->handler); 800 kfree(r); 801 } 802 803 static int ioctl_allocate(struct client *client, union ioctl_arg *arg) 804 { 805 struct fw_cdev_allocate *a = &arg->allocate; 806 struct address_handler_resource *r; 807 struct fw_address_region region; 808 int ret; 809 810 r = kmalloc(sizeof(*r), GFP_KERNEL); 811 if (r == NULL) 812 return -ENOMEM; 813 814 region.start = a->offset; 815 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) 816 region.end = a->offset + a->length; 817 else 818 region.end = a->region_end; 819 820 r->handler.length = a->length; 821 r->handler.address_callback = handle_request; 822 r->handler.callback_data = r; 823 r->closure = a->closure; 824 r->client = client; 825 826 ret = fw_core_add_address_handler(&r->handler, ®ion); 827 if (ret < 0) { 828 kfree(r); 829 return ret; 830 } 831 a->offset = r->handler.offset; 832 833 r->resource.release = release_address_handler; 834 ret = add_client_resource(client, &r->resource, GFP_KERNEL); 835 if (ret < 0) { 836 release_address_handler(client, &r->resource); 837 return ret; 838 } 839 a->handle = r->resource.handle; 840 841 return 0; 842 } 843 844 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) 845 { 846 return release_client_resource(client, arg->deallocate.handle, 847 release_address_handler, NULL); 848 } 849 850 static int ioctl_send_response(struct client *client, union ioctl_arg *arg) 851 { 852 struct fw_cdev_send_response *a = &arg->send_response; 853 struct client_resource *resource; 854 struct inbound_transaction_resource *r; 855 int ret = 0; 856 857 if (release_client_resource(client, a->handle, 858 release_request, &resource) < 0) 859 return -EINVAL; 860 861 r = container_of(resource, struct inbound_transaction_resource, 862 resource); 863 if (r->is_fcp) { 864 fw_request_put(r->request); 865 goto out; 866 } 867 868 if (a->length != fw_get_response_length(r->request)) { 869 ret = -EINVAL; 870 fw_request_put(r->request); 871 goto out; 872 } 873 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { 874 ret = -EFAULT; 875 fw_request_put(r->request); 876 goto out; 877 } 878 fw_send_response(r->card, r->request, a->rcode); 879 out: 880 fw_card_put(r->card); 881 kfree(r); 882 883 return ret; 884 } 885 886 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) 887 { 888 fw_schedule_bus_reset(client->device->card, true, 889 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); 890 return 0; 891 } 892 893 static void release_descriptor(struct client *client, 894 struct client_resource *resource) 895 { 896 struct descriptor_resource *r = 897 container_of(resource, struct descriptor_resource, resource); 898 899 fw_core_remove_descriptor(&r->descriptor); 900 kfree(r); 901 } 902 903 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) 904 { 905 struct fw_cdev_add_descriptor *a = &arg->add_descriptor; 906 struct descriptor_resource *r; 907 int ret; 908 909 /* Access policy: Allow this ioctl only on local nodes' device files. */ 910 if (!client->device->is_local) 911 return -ENOSYS; 912 913 if (a->length > 256) 914 return -EINVAL; 915 916 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); 917 if (r == NULL) 918 return -ENOMEM; 919 920 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { 921 ret = -EFAULT; 922 goto failed; 923 } 924 925 r->descriptor.length = a->length; 926 r->descriptor.immediate = a->immediate; 927 r->descriptor.key = a->key; 928 r->descriptor.data = r->data; 929 930 ret = fw_core_add_descriptor(&r->descriptor); 931 if (ret < 0) 932 goto failed; 933 934 r->resource.release = release_descriptor; 935 ret = add_client_resource(client, &r->resource, GFP_KERNEL); 936 if (ret < 0) { 937 fw_core_remove_descriptor(&r->descriptor); 938 goto failed; 939 } 940 a->handle = r->resource.handle; 941 942 return 0; 943 failed: 944 kfree(r); 945 946 return ret; 947 } 948 949 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) 950 { 951 return release_client_resource(client, arg->remove_descriptor.handle, 952 release_descriptor, NULL); 953 } 954 955 static void iso_callback(struct fw_iso_context *context, u32 cycle, 956 size_t header_length, void *header, void *data) 957 { 958 struct client *client = data; 959 struct iso_interrupt_event *e; 960 961 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); 962 if (e == NULL) 963 return; 964 965 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 966 e->interrupt.closure = client->iso_closure; 967 e->interrupt.cycle = cycle; 968 e->interrupt.header_length = header_length; 969 memcpy(e->interrupt.header, header, header_length); 970 queue_event(client, &e->event, &e->interrupt, 971 sizeof(e->interrupt) + header_length, NULL, 0); 972 } 973 974 static void iso_mc_callback(struct fw_iso_context *context, 975 dma_addr_t completed, void *data) 976 { 977 struct client *client = data; 978 struct iso_interrupt_mc_event *e; 979 980 e = kmalloc(sizeof(*e), GFP_ATOMIC); 981 if (e == NULL) 982 return; 983 984 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; 985 e->interrupt.closure = client->iso_closure; 986 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, 987 completed); 988 queue_event(client, &e->event, &e->interrupt, 989 sizeof(e->interrupt), NULL, 0); 990 } 991 992 static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) 993 { 994 if (context->type == FW_ISO_CONTEXT_TRANSMIT) 995 return DMA_TO_DEVICE; 996 else 997 return DMA_FROM_DEVICE; 998 } 999 1000 static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card, 1001 fw_iso_mc_callback_t callback, 1002 void *callback_data) 1003 { 1004 struct fw_iso_context *ctx; 1005 1006 ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL, 1007 0, 0, 0, NULL, callback_data); 1008 if (!IS_ERR(ctx)) 1009 ctx->callback.mc = callback; 1010 1011 return ctx; 1012 } 1013 1014 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) 1015 { 1016 struct fw_cdev_create_iso_context *a = &arg->create_iso_context; 1017 struct fw_iso_context *context; 1018 union fw_iso_callback cb; 1019 int ret; 1020 1021 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || 1022 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || 1023 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != 1024 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); 1025 1026 switch (a->type) { 1027 case FW_ISO_CONTEXT_TRANSMIT: 1028 if (a->speed > SCODE_3200 || a->channel > 63) 1029 return -EINVAL; 1030 1031 cb.sc = iso_callback; 1032 break; 1033 1034 case FW_ISO_CONTEXT_RECEIVE: 1035 if (a->header_size < 4 || (a->header_size & 3) || 1036 a->channel > 63) 1037 return -EINVAL; 1038 1039 cb.sc = iso_callback; 1040 break; 1041 1042 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 1043 cb.mc = iso_mc_callback; 1044 break; 1045 1046 default: 1047 return -EINVAL; 1048 } 1049 1050 if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) 1051 context = fw_iso_mc_context_create(client->device->card, cb.mc, 1052 client); 1053 else 1054 context = fw_iso_context_create(client->device->card, a->type, 1055 a->channel, a->speed, 1056 a->header_size, cb.sc, client); 1057 if (IS_ERR(context)) 1058 return PTR_ERR(context); 1059 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) 1060 context->drop_overflow_headers = true; 1061 1062 // We only support one context at this time. 1063 guard(spinlock_irq)(&client->lock); 1064 1065 if (client->iso_context != NULL) { 1066 fw_iso_context_destroy(context); 1067 1068 return -EBUSY; 1069 } 1070 if (!client->buffer_is_mapped) { 1071 ret = fw_iso_buffer_map_dma(&client->buffer, 1072 client->device->card, 1073 iso_dma_direction(context)); 1074 if (ret < 0) { 1075 fw_iso_context_destroy(context); 1076 1077 return ret; 1078 } 1079 client->buffer_is_mapped = true; 1080 } 1081 client->iso_closure = a->closure; 1082 client->iso_context = context; 1083 1084 a->handle = 0; 1085 1086 return 0; 1087 } 1088 1089 static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) 1090 { 1091 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; 1092 struct fw_iso_context *ctx = client->iso_context; 1093 1094 if (ctx == NULL || a->handle != 0) 1095 return -EINVAL; 1096 1097 return fw_iso_context_set_channels(ctx, &a->channels); 1098 } 1099 1100 /* Macros for decoding the iso packet control header. */ 1101 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) 1102 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) 1103 #define GET_SKIP(v) (((v) >> 17) & 0x01) 1104 #define GET_TAG(v) (((v) >> 18) & 0x03) 1105 #define GET_SY(v) (((v) >> 20) & 0x0f) 1106 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 1107 1108 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) 1109 { 1110 struct fw_cdev_queue_iso *a = &arg->queue_iso; 1111 struct fw_cdev_iso_packet __user *p, *end, *next; 1112 struct fw_iso_context *ctx = client->iso_context; 1113 unsigned long payload, buffer_end, transmit_header_bytes = 0; 1114 u32 control; 1115 int count; 1116 struct { 1117 struct fw_iso_packet packet; 1118 u8 header[256]; 1119 } u; 1120 1121 if (ctx == NULL || a->handle != 0) 1122 return -EINVAL; 1123 1124 /* 1125 * If the user passes a non-NULL data pointer, has mmap()'ed 1126 * the iso buffer, and the pointer points inside the buffer, 1127 * we setup the payload pointers accordingly. Otherwise we 1128 * set them both to 0, which will still let packets with 1129 * payload_length == 0 through. In other words, if no packets 1130 * use the indirect payload, the iso buffer need not be mapped 1131 * and the a->data pointer is ignored. 1132 */ 1133 payload = (unsigned long)a->data - client->vm_start; 1134 buffer_end = client->buffer.page_count << PAGE_SHIFT; 1135 if (a->data == 0 || client->buffer.pages == NULL || 1136 payload >= buffer_end) { 1137 payload = 0; 1138 buffer_end = 0; 1139 } 1140 1141 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) 1142 return -EINVAL; 1143 1144 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); 1145 1146 end = (void __user *)p + a->size; 1147 count = 0; 1148 while (p < end) { 1149 if (get_user(control, &p->control)) 1150 return -EFAULT; 1151 u.packet.payload_length = GET_PAYLOAD_LENGTH(control); 1152 u.packet.interrupt = GET_INTERRUPT(control); 1153 u.packet.skip = GET_SKIP(control); 1154 u.packet.tag = GET_TAG(control); 1155 u.packet.sy = GET_SY(control); 1156 u.packet.header_length = GET_HEADER_LENGTH(control); 1157 1158 switch (ctx->type) { 1159 case FW_ISO_CONTEXT_TRANSMIT: 1160 if (u.packet.header_length & 3) 1161 return -EINVAL; 1162 transmit_header_bytes = u.packet.header_length; 1163 break; 1164 1165 case FW_ISO_CONTEXT_RECEIVE: 1166 if (u.packet.header_length == 0 || 1167 u.packet.header_length % ctx->header_size != 0) 1168 return -EINVAL; 1169 break; 1170 1171 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 1172 if (u.packet.payload_length == 0 || 1173 u.packet.payload_length & 3) 1174 return -EINVAL; 1175 break; 1176 } 1177 1178 next = (struct fw_cdev_iso_packet __user *) 1179 &p->header[transmit_header_bytes / 4]; 1180 if (next > end) 1181 return -EINVAL; 1182 if (copy_from_user 1183 (u.packet.header, p->header, transmit_header_bytes)) 1184 return -EFAULT; 1185 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && 1186 u.packet.header_length + u.packet.payload_length > 0) 1187 return -EINVAL; 1188 if (payload + u.packet.payload_length > buffer_end) 1189 return -EINVAL; 1190 1191 if (fw_iso_context_queue(ctx, &u.packet, 1192 &client->buffer, payload)) 1193 break; 1194 1195 p = next; 1196 payload += u.packet.payload_length; 1197 count++; 1198 } 1199 fw_iso_context_queue_flush(ctx); 1200 1201 a->size -= uptr_to_u64(p) - a->packets; 1202 a->packets = uptr_to_u64(p); 1203 a->data = client->vm_start + payload; 1204 1205 return count; 1206 } 1207 1208 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) 1209 { 1210 struct fw_cdev_start_iso *a = &arg->start_iso; 1211 1212 BUILD_BUG_ON( 1213 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || 1214 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || 1215 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || 1216 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || 1217 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); 1218 1219 if (client->iso_context == NULL || a->handle != 0) 1220 return -EINVAL; 1221 1222 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && 1223 (a->tags == 0 || a->tags > 15 || a->sync > 15)) 1224 return -EINVAL; 1225 1226 return fw_iso_context_start(client->iso_context, 1227 a->cycle, a->sync, a->tags); 1228 } 1229 1230 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) 1231 { 1232 struct fw_cdev_stop_iso *a = &arg->stop_iso; 1233 1234 if (client->iso_context == NULL || a->handle != 0) 1235 return -EINVAL; 1236 1237 return fw_iso_context_stop(client->iso_context); 1238 } 1239 1240 static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg) 1241 { 1242 struct fw_cdev_flush_iso *a = &arg->flush_iso; 1243 1244 if (client->iso_context == NULL || a->handle != 0) 1245 return -EINVAL; 1246 1247 return fw_iso_context_flush_completions(client->iso_context); 1248 } 1249 1250 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) 1251 { 1252 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; 1253 struct fw_card *card = client->device->card; 1254 struct timespec64 ts = {0, 0}; 1255 u32 cycle_time = 0; 1256 int ret; 1257 1258 guard(irq)(); 1259 1260 ret = fw_card_read_cycle_time(card, &cycle_time); 1261 if (ret < 0) 1262 return ret; 1263 1264 switch (a->clk_id) { 1265 case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break; 1266 case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break; 1267 case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break; 1268 default: 1269 return -EINVAL; 1270 } 1271 1272 a->tv_sec = ts.tv_sec; 1273 a->tv_nsec = ts.tv_nsec; 1274 a->cycle_timer = cycle_time; 1275 1276 return 0; 1277 } 1278 1279 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) 1280 { 1281 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; 1282 struct fw_cdev_get_cycle_timer2 ct2; 1283 1284 ct2.clk_id = CLOCK_REALTIME; 1285 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); 1286 1287 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; 1288 a->cycle_timer = ct2.cycle_timer; 1289 1290 return 0; 1291 } 1292 1293 static void iso_resource_work(struct work_struct *work) 1294 { 1295 struct iso_resource_event *e; 1296 struct iso_resource *r = 1297 container_of(work, struct iso_resource, work.work); 1298 struct client *client = r->client; 1299 int generation, channel, bandwidth, todo; 1300 bool skip, free, success; 1301 1302 scoped_guard(spinlock_irq, &client->lock) { 1303 generation = client->device->generation; 1304 todo = r->todo; 1305 // Allow 1000ms grace period for other reallocations. 1306 if (todo == ISO_RES_ALLOC && 1307 time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) { 1308 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); 1309 skip = true; 1310 } else { 1311 // We could be called twice within the same generation. 1312 skip = todo == ISO_RES_REALLOC && 1313 r->generation == generation; 1314 } 1315 free = todo == ISO_RES_DEALLOC || 1316 todo == ISO_RES_ALLOC_ONCE || 1317 todo == ISO_RES_DEALLOC_ONCE; 1318 r->generation = generation; 1319 } 1320 1321 if (skip) 1322 goto out; 1323 1324 bandwidth = r->bandwidth; 1325 1326 fw_iso_resource_manage(client->device->card, generation, 1327 r->channels, &channel, &bandwidth, 1328 todo == ISO_RES_ALLOC || 1329 todo == ISO_RES_REALLOC || 1330 todo == ISO_RES_ALLOC_ONCE); 1331 /* 1332 * Is this generation outdated already? As long as this resource sticks 1333 * in the idr, it will be scheduled again for a newer generation or at 1334 * shutdown. 1335 */ 1336 if (channel == -EAGAIN && 1337 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) 1338 goto out; 1339 1340 success = channel >= 0 || bandwidth > 0; 1341 1342 scoped_guard(spinlock_irq, &client->lock) { 1343 // Transit from allocation to reallocation, except if the client 1344 // requested deallocation in the meantime. 1345 if (r->todo == ISO_RES_ALLOC) 1346 r->todo = ISO_RES_REALLOC; 1347 // Allocation or reallocation failure? Pull this resource out of the 1348 // idr and prepare for deletion, unless the client is shutting down. 1349 if (r->todo == ISO_RES_REALLOC && !success && 1350 !client->in_shutdown && 1351 idr_remove(&client->resource_idr, r->resource.handle)) { 1352 client_put(client); 1353 free = true; 1354 } 1355 } 1356 1357 if (todo == ISO_RES_ALLOC && channel >= 0) 1358 r->channels = 1ULL << channel; 1359 1360 if (todo == ISO_RES_REALLOC && success) 1361 goto out; 1362 1363 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { 1364 e = r->e_alloc; 1365 r->e_alloc = NULL; 1366 } else { 1367 e = r->e_dealloc; 1368 r->e_dealloc = NULL; 1369 } 1370 e->iso_resource.handle = r->resource.handle; 1371 e->iso_resource.channel = channel; 1372 e->iso_resource.bandwidth = bandwidth; 1373 1374 queue_event(client, &e->event, 1375 &e->iso_resource, sizeof(e->iso_resource), NULL, 0); 1376 1377 if (free) { 1378 cancel_delayed_work(&r->work); 1379 kfree(r->e_alloc); 1380 kfree(r->e_dealloc); 1381 kfree(r); 1382 } 1383 out: 1384 client_put(client); 1385 } 1386 1387 static void release_iso_resource(struct client *client, 1388 struct client_resource *resource) 1389 { 1390 struct iso_resource *r = 1391 container_of(resource, struct iso_resource, resource); 1392 1393 guard(spinlock_irq)(&client->lock); 1394 1395 r->todo = ISO_RES_DEALLOC; 1396 schedule_iso_resource(r, 0); 1397 } 1398 1399 static int init_iso_resource(struct client *client, 1400 struct fw_cdev_allocate_iso_resource *request, int todo) 1401 { 1402 struct iso_resource_event *e1, *e2; 1403 struct iso_resource *r; 1404 int ret; 1405 1406 if ((request->channels == 0 && request->bandwidth == 0) || 1407 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) 1408 return -EINVAL; 1409 1410 r = kmalloc(sizeof(*r), GFP_KERNEL); 1411 e1 = kmalloc(sizeof(*e1), GFP_KERNEL); 1412 e2 = kmalloc(sizeof(*e2), GFP_KERNEL); 1413 if (r == NULL || e1 == NULL || e2 == NULL) { 1414 ret = -ENOMEM; 1415 goto fail; 1416 } 1417 1418 INIT_DELAYED_WORK(&r->work, iso_resource_work); 1419 r->client = client; 1420 r->todo = todo; 1421 r->generation = -1; 1422 r->channels = request->channels; 1423 r->bandwidth = request->bandwidth; 1424 r->e_alloc = e1; 1425 r->e_dealloc = e2; 1426 1427 e1->iso_resource.closure = request->closure; 1428 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; 1429 e2->iso_resource.closure = request->closure; 1430 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; 1431 1432 if (todo == ISO_RES_ALLOC) { 1433 r->resource.release = release_iso_resource; 1434 ret = add_client_resource(client, &r->resource, GFP_KERNEL); 1435 if (ret < 0) 1436 goto fail; 1437 } else { 1438 r->resource.release = NULL; 1439 r->resource.handle = -1; 1440 schedule_iso_resource(r, 0); 1441 } 1442 request->handle = r->resource.handle; 1443 1444 return 0; 1445 fail: 1446 kfree(r); 1447 kfree(e1); 1448 kfree(e2); 1449 1450 return ret; 1451 } 1452 1453 static int ioctl_allocate_iso_resource(struct client *client, 1454 union ioctl_arg *arg) 1455 { 1456 return init_iso_resource(client, 1457 &arg->allocate_iso_resource, ISO_RES_ALLOC); 1458 } 1459 1460 static int ioctl_deallocate_iso_resource(struct client *client, 1461 union ioctl_arg *arg) 1462 { 1463 return release_client_resource(client, 1464 arg->deallocate.handle, release_iso_resource, NULL); 1465 } 1466 1467 static int ioctl_allocate_iso_resource_once(struct client *client, 1468 union ioctl_arg *arg) 1469 { 1470 return init_iso_resource(client, 1471 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); 1472 } 1473 1474 static int ioctl_deallocate_iso_resource_once(struct client *client, 1475 union ioctl_arg *arg) 1476 { 1477 return init_iso_resource(client, 1478 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); 1479 } 1480 1481 /* 1482 * Returns a speed code: Maximum speed to or from this device, 1483 * limited by the device's link speed, the local node's link speed, 1484 * and all PHY port speeds between the two links. 1485 */ 1486 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) 1487 { 1488 return client->device->max_speed; 1489 } 1490 1491 static int ioctl_send_broadcast_request(struct client *client, 1492 union ioctl_arg *arg) 1493 { 1494 struct fw_cdev_send_request *a = &arg->send_request; 1495 1496 switch (a->tcode) { 1497 case TCODE_WRITE_QUADLET_REQUEST: 1498 case TCODE_WRITE_BLOCK_REQUEST: 1499 break; 1500 default: 1501 return -EINVAL; 1502 } 1503 1504 /* Security policy: Only allow accesses to Units Space. */ 1505 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) 1506 return -EACCES; 1507 1508 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); 1509 } 1510 1511 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) 1512 { 1513 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; 1514 struct fw_cdev_send_request request; 1515 int dest; 1516 1517 if (a->speed > client->device->card->link_speed || 1518 a->length > 1024 << a->speed) 1519 return -EIO; 1520 1521 if (a->tag > 3 || a->channel > 63 || a->sy > 15) 1522 return -EINVAL; 1523 1524 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); 1525 request.tcode = TCODE_STREAM_DATA; 1526 request.length = a->length; 1527 request.closure = a->closure; 1528 request.data = a->data; 1529 request.generation = a->generation; 1530 1531 return init_request(client, &request, dest, a->speed); 1532 } 1533 1534 static void outbound_phy_packet_callback(struct fw_packet *packet, 1535 struct fw_card *card, int status) 1536 { 1537 struct outbound_phy_packet_event *e = 1538 container_of(packet, struct outbound_phy_packet_event, p); 1539 struct client *e_client = e->client; 1540 u32 rcode; 1541 1542 trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation, 1543 packet->timestamp); 1544 1545 switch (status) { 1546 // expected: 1547 case ACK_COMPLETE: 1548 rcode = RCODE_COMPLETE; 1549 break; 1550 // should never happen with PHY packets: 1551 case ACK_PENDING: 1552 rcode = RCODE_COMPLETE; 1553 break; 1554 case ACK_BUSY_X: 1555 case ACK_BUSY_A: 1556 case ACK_BUSY_B: 1557 rcode = RCODE_BUSY; 1558 break; 1559 case ACK_DATA_ERROR: 1560 rcode = RCODE_DATA_ERROR; 1561 break; 1562 case ACK_TYPE_ERROR: 1563 rcode = RCODE_TYPE_ERROR; 1564 break; 1565 // stale generation; cancelled; on certain controllers: no ack 1566 default: 1567 rcode = status; 1568 break; 1569 } 1570 1571 switch (e->phy_packet.without_tstamp.type) { 1572 case FW_CDEV_EVENT_PHY_PACKET_SENT: 1573 { 1574 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; 1575 1576 pp->rcode = rcode; 1577 pp->data[0] = packet->timestamp; 1578 queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length, 1579 NULL, 0); 1580 break; 1581 } 1582 case FW_CDEV_EVENT_PHY_PACKET_SENT2: 1583 { 1584 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; 1585 1586 pp->rcode = rcode; 1587 pp->tstamp = packet->timestamp; 1588 queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length, 1589 NULL, 0); 1590 break; 1591 } 1592 default: 1593 WARN_ON(1); 1594 break; 1595 } 1596 1597 client_put(e_client); 1598 } 1599 1600 static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) 1601 { 1602 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; 1603 struct fw_card *card = client->device->card; 1604 struct outbound_phy_packet_event *e; 1605 1606 /* Access policy: Allow this ioctl only on local nodes' device files. */ 1607 if (!client->device->is_local) 1608 return -ENOSYS; 1609 1610 e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL); 1611 if (e == NULL) 1612 return -ENOMEM; 1613 1614 client_get(client); 1615 e->client = client; 1616 e->p.speed = SCODE_100; 1617 e->p.generation = a->generation; 1618 async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL); 1619 e->p.header[1] = a->data[0]; 1620 e->p.header[2] = a->data[1]; 1621 e->p.header_length = 12; 1622 e->p.callback = outbound_phy_packet_callback; 1623 1624 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { 1625 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; 1626 1627 pp->closure = a->closure; 1628 pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT; 1629 if (is_ping_packet(a->data)) 1630 pp->length = 4; 1631 } else { 1632 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; 1633 1634 pp->closure = a->closure; 1635 pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2; 1636 // Keep the data field so that application can match the response event to the 1637 // request. 1638 pp->length = sizeof(a->data); 1639 memcpy(pp->data, a->data, sizeof(a->data)); 1640 } 1641 1642 trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation, 1643 e->p.header[1], e->p.header[2]); 1644 1645 card->driver->send_request(card, &e->p); 1646 1647 return 0; 1648 } 1649 1650 static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) 1651 { 1652 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; 1653 struct fw_card *card = client->device->card; 1654 1655 /* Access policy: Allow this ioctl only on local nodes' device files. */ 1656 if (!client->device->is_local) 1657 return -ENOSYS; 1658 1659 guard(spinlock_irq)(&card->lock); 1660 1661 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); 1662 client->phy_receiver_closure = a->closure; 1663 1664 return 0; 1665 } 1666 1667 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) 1668 { 1669 struct client *client; 1670 1671 guard(spinlock_irqsave)(&card->lock); 1672 1673 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { 1674 struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); 1675 if (e == NULL) 1676 break; 1677 1678 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { 1679 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; 1680 1681 pp->closure = client->phy_receiver_closure; 1682 pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; 1683 pp->rcode = RCODE_COMPLETE; 1684 pp->length = 8; 1685 pp->data[0] = p->header[1]; 1686 pp->data[1] = p->header[2]; 1687 queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0); 1688 } else { 1689 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; 1690 1691 pp = &e->phy_packet.with_tstamp; 1692 pp->closure = client->phy_receiver_closure; 1693 pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2; 1694 pp->rcode = RCODE_COMPLETE; 1695 pp->length = 8; 1696 pp->tstamp = p->timestamp; 1697 pp->data[0] = p->header[1]; 1698 pp->data[1] = p->header[2]; 1699 queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0); 1700 } 1701 } 1702 } 1703 1704 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { 1705 [0x00] = ioctl_get_info, 1706 [0x01] = ioctl_send_request, 1707 [0x02] = ioctl_allocate, 1708 [0x03] = ioctl_deallocate, 1709 [0x04] = ioctl_send_response, 1710 [0x05] = ioctl_initiate_bus_reset, 1711 [0x06] = ioctl_add_descriptor, 1712 [0x07] = ioctl_remove_descriptor, 1713 [0x08] = ioctl_create_iso_context, 1714 [0x09] = ioctl_queue_iso, 1715 [0x0a] = ioctl_start_iso, 1716 [0x0b] = ioctl_stop_iso, 1717 [0x0c] = ioctl_get_cycle_timer, 1718 [0x0d] = ioctl_allocate_iso_resource, 1719 [0x0e] = ioctl_deallocate_iso_resource, 1720 [0x0f] = ioctl_allocate_iso_resource_once, 1721 [0x10] = ioctl_deallocate_iso_resource_once, 1722 [0x11] = ioctl_get_speed, 1723 [0x12] = ioctl_send_broadcast_request, 1724 [0x13] = ioctl_send_stream_packet, 1725 [0x14] = ioctl_get_cycle_timer2, 1726 [0x15] = ioctl_send_phy_packet, 1727 [0x16] = ioctl_receive_phy_packets, 1728 [0x17] = ioctl_set_iso_channels, 1729 [0x18] = ioctl_flush_iso, 1730 }; 1731 1732 static int dispatch_ioctl(struct client *client, 1733 unsigned int cmd, void __user *arg) 1734 { 1735 union ioctl_arg buffer; 1736 int ret; 1737 1738 if (fw_device_is_shutdown(client->device)) 1739 return -ENODEV; 1740 1741 if (_IOC_TYPE(cmd) != '#' || 1742 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || 1743 _IOC_SIZE(cmd) > sizeof(buffer)) 1744 return -ENOTTY; 1745 1746 memset(&buffer, 0, sizeof(buffer)); 1747 1748 if (_IOC_DIR(cmd) & _IOC_WRITE) 1749 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) 1750 return -EFAULT; 1751 1752 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); 1753 if (ret < 0) 1754 return ret; 1755 1756 if (_IOC_DIR(cmd) & _IOC_READ) 1757 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) 1758 return -EFAULT; 1759 1760 return ret; 1761 } 1762 1763 static long fw_device_op_ioctl(struct file *file, 1764 unsigned int cmd, unsigned long arg) 1765 { 1766 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); 1767 } 1768 1769 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 1770 { 1771 struct client *client = file->private_data; 1772 unsigned long size; 1773 int page_count, ret; 1774 1775 if (fw_device_is_shutdown(client->device)) 1776 return -ENODEV; 1777 1778 /* FIXME: We could support multiple buffers, but we don't. */ 1779 if (client->buffer.pages != NULL) 1780 return -EBUSY; 1781 1782 if (!(vma->vm_flags & VM_SHARED)) 1783 return -EINVAL; 1784 1785 if (vma->vm_start & ~PAGE_MASK) 1786 return -EINVAL; 1787 1788 client->vm_start = vma->vm_start; 1789 size = vma->vm_end - vma->vm_start; 1790 page_count = size >> PAGE_SHIFT; 1791 if (size & ~PAGE_MASK) 1792 return -EINVAL; 1793 1794 ret = fw_iso_buffer_alloc(&client->buffer, page_count); 1795 if (ret < 0) 1796 return ret; 1797 1798 scoped_guard(spinlock_irq, &client->lock) { 1799 if (client->iso_context) { 1800 ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card, 1801 iso_dma_direction(client->iso_context)); 1802 if (ret < 0) 1803 goto fail; 1804 client->buffer_is_mapped = true; 1805 } 1806 } 1807 1808 ret = vm_map_pages_zero(vma, client->buffer.pages, 1809 client->buffer.page_count); 1810 if (ret < 0) 1811 goto fail; 1812 1813 return 0; 1814 fail: 1815 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1816 return ret; 1817 } 1818 1819 static int is_outbound_transaction_resource(int id, void *p, void *data) 1820 { 1821 struct client_resource *resource = p; 1822 1823 return resource->release == release_transaction; 1824 } 1825 1826 static int has_outbound_transactions(struct client *client) 1827 { 1828 guard(spinlock_irq)(&client->lock); 1829 1830 return idr_for_each(&client->resource_idr, is_outbound_transaction_resource, NULL); 1831 } 1832 1833 static int shutdown_resource(int id, void *p, void *data) 1834 { 1835 struct client_resource *resource = p; 1836 struct client *client = data; 1837 1838 resource->release(client, resource); 1839 client_put(client); 1840 1841 return 0; 1842 } 1843 1844 static int fw_device_op_release(struct inode *inode, struct file *file) 1845 { 1846 struct client *client = file->private_data; 1847 struct event *event, *next_event; 1848 1849 scoped_guard(spinlock_irq, &client->device->card->lock) 1850 list_del(&client->phy_receiver_link); 1851 1852 scoped_guard(mutex, &client->device->client_list_mutex) 1853 list_del(&client->link); 1854 1855 if (client->iso_context) 1856 fw_iso_context_destroy(client->iso_context); 1857 1858 if (client->buffer.pages) 1859 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1860 1861 /* Freeze client->resource_idr and client->event_list */ 1862 scoped_guard(spinlock_irq, &client->lock) 1863 client->in_shutdown = true; 1864 1865 wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); 1866 1867 idr_for_each(&client->resource_idr, shutdown_resource, client); 1868 idr_destroy(&client->resource_idr); 1869 1870 list_for_each_entry_safe(event, next_event, &client->event_list, link) 1871 kfree(event); 1872 1873 client_put(client); 1874 1875 return 0; 1876 } 1877 1878 static __poll_t fw_device_op_poll(struct file *file, poll_table * pt) 1879 { 1880 struct client *client = file->private_data; 1881 __poll_t mask = 0; 1882 1883 poll_wait(file, &client->wait, pt); 1884 1885 if (fw_device_is_shutdown(client->device)) 1886 mask |= EPOLLHUP | EPOLLERR; 1887 if (!list_empty(&client->event_list)) 1888 mask |= EPOLLIN | EPOLLRDNORM; 1889 1890 return mask; 1891 } 1892 1893 const struct file_operations fw_device_ops = { 1894 .owner = THIS_MODULE, 1895 .llseek = no_llseek, 1896 .open = fw_device_op_open, 1897 .read = fw_device_op_read, 1898 .unlocked_ioctl = fw_device_op_ioctl, 1899 .mmap = fw_device_op_mmap, 1900 .release = fw_device_op_release, 1901 .poll = fw_device_op_poll, 1902 .compat_ioctl = compat_ptr_ioctl, 1903 }; 1904