xref: /linux-6.15/drivers/firewire/core-cdev.c (revision 2a6a58f0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Char device for device raw access
4  *
5  * Copyright (C) 2005-2007  Kristian Hoegsberg <[email protected]>
6  */
7 
8 #include <linux/bug.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/firewire.h>
16 #include <linux/firewire-cdev.h>
17 #include <linux/idr.h>
18 #include <linux/irqflags.h>
19 #include <linux/jiffies.h>
20 #include <linux/kernel.h>
21 #include <linux/kref.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/poll.h>
26 #include <linux/sched.h> /* required for linux/wait.h */
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/uaccess.h>
32 #include <linux/vmalloc.h>
33 #include <linux/wait.h>
34 #include <linux/workqueue.h>
35 
36 
37 #include "core.h"
38 #include <trace/events/firewire.h>
39 
40 #include "packet-header-definitions.h"
41 
42 /*
43  * ABI version history is documented in linux/firewire-cdev.h.
44  */
45 #define FW_CDEV_KERNEL_VERSION			5
46 #define FW_CDEV_VERSION_EVENT_REQUEST2		4
47 #define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
48 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW	5
49 #define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP	6
50 
51 struct client {
52 	u32 version;
53 	struct fw_device *device;
54 
55 	spinlock_t lock;
56 	bool in_shutdown;
57 	struct idr resource_idr;
58 	struct list_head event_list;
59 	wait_queue_head_t wait;
60 	wait_queue_head_t tx_flush_wait;
61 	u64 bus_reset_closure;
62 
63 	struct fw_iso_context *iso_context;
64 	u64 iso_closure;
65 	struct fw_iso_buffer buffer;
66 	unsigned long vm_start;
67 	bool buffer_is_mapped;
68 
69 	struct list_head phy_receiver_link;
70 	u64 phy_receiver_closure;
71 
72 	struct list_head link;
73 	struct kref kref;
74 };
75 
76 static inline void client_get(struct client *client)
77 {
78 	kref_get(&client->kref);
79 }
80 
81 static void client_release(struct kref *kref)
82 {
83 	struct client *client = container_of(kref, struct client, kref);
84 
85 	fw_device_put(client->device);
86 	kfree(client);
87 }
88 
89 static void client_put(struct client *client)
90 {
91 	kref_put(&client->kref, client_release);
92 }
93 
94 struct client_resource;
95 typedef void (*client_resource_release_fn_t)(struct client *,
96 					     struct client_resource *);
97 struct client_resource {
98 	client_resource_release_fn_t release;
99 	int handle;
100 };
101 
102 struct address_handler_resource {
103 	struct client_resource resource;
104 	struct fw_address_handler handler;
105 	__u64 closure;
106 	struct client *client;
107 };
108 
109 struct outbound_transaction_resource {
110 	struct client_resource resource;
111 	struct fw_transaction transaction;
112 };
113 
114 struct inbound_transaction_resource {
115 	struct client_resource resource;
116 	struct fw_card *card;
117 	struct fw_request *request;
118 	bool is_fcp;
119 	void *data;
120 	size_t length;
121 };
122 
123 struct descriptor_resource {
124 	struct client_resource resource;
125 	struct fw_descriptor descriptor;
126 	u32 data[];
127 };
128 
129 struct iso_resource {
130 	struct client_resource resource;
131 	struct client *client;
132 	/* Schedule work and access todo only with client->lock held. */
133 	struct delayed_work work;
134 	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
135 	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
136 	int generation;
137 	u64 channels;
138 	s32 bandwidth;
139 	struct iso_resource_event *e_alloc, *e_dealloc;
140 };
141 
142 static void release_iso_resource(struct client *, struct client_resource *);
143 
144 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
145 {
146 	client_get(r->client);
147 	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
148 		client_put(r->client);
149 }
150 
151 static void schedule_if_iso_resource(struct client_resource *resource)
152 {
153 	if (resource->release == release_iso_resource)
154 		schedule_iso_resource(container_of(resource,
155 					struct iso_resource, resource), 0);
156 }
157 
158 /*
159  * dequeue_event() just kfree()'s the event, so the event has to be
160  * the first field in a struct XYZ_event.
161  */
162 struct event {
163 	struct { void *data; size_t size; } v[2];
164 	struct list_head link;
165 };
166 
167 struct bus_reset_event {
168 	struct event event;
169 	struct fw_cdev_event_bus_reset reset;
170 };
171 
172 struct outbound_transaction_event {
173 	struct event event;
174 	struct client *client;
175 	struct outbound_transaction_resource r;
176 	union {
177 		struct fw_cdev_event_response without_tstamp;
178 		struct fw_cdev_event_response2 with_tstamp;
179 	} rsp;
180 };
181 
182 struct inbound_transaction_event {
183 	struct event event;
184 	union {
185 		struct fw_cdev_event_request request;
186 		struct fw_cdev_event_request2 request2;
187 		struct fw_cdev_event_request3 with_tstamp;
188 	} req;
189 };
190 
191 struct iso_interrupt_event {
192 	struct event event;
193 	struct fw_cdev_event_iso_interrupt interrupt;
194 };
195 
196 struct iso_interrupt_mc_event {
197 	struct event event;
198 	struct fw_cdev_event_iso_interrupt_mc interrupt;
199 };
200 
201 struct iso_resource_event {
202 	struct event event;
203 	struct fw_cdev_event_iso_resource iso_resource;
204 };
205 
206 struct outbound_phy_packet_event {
207 	struct event event;
208 	struct client *client;
209 	struct fw_packet p;
210 	union {
211 		struct fw_cdev_event_phy_packet without_tstamp;
212 		struct fw_cdev_event_phy_packet2 with_tstamp;
213 	} phy_packet;
214 };
215 
216 struct inbound_phy_packet_event {
217 	struct event event;
218 	union {
219 		struct fw_cdev_event_phy_packet without_tstamp;
220 		struct fw_cdev_event_phy_packet2 with_tstamp;
221 	} phy_packet;
222 };
223 
224 #ifdef CONFIG_COMPAT
225 static void __user *u64_to_uptr(u64 value)
226 {
227 	if (in_compat_syscall())
228 		return compat_ptr(value);
229 	else
230 		return (void __user *)(unsigned long)value;
231 }
232 
233 static u64 uptr_to_u64(void __user *ptr)
234 {
235 	if (in_compat_syscall())
236 		return ptr_to_compat(ptr);
237 	else
238 		return (u64)(unsigned long)ptr;
239 }
240 #else
241 static inline void __user *u64_to_uptr(u64 value)
242 {
243 	return (void __user *)(unsigned long)value;
244 }
245 
246 static inline u64 uptr_to_u64(void __user *ptr)
247 {
248 	return (u64)(unsigned long)ptr;
249 }
250 #endif /* CONFIG_COMPAT */
251 
252 static int fw_device_op_open(struct inode *inode, struct file *file)
253 {
254 	struct fw_device *device;
255 	struct client *client;
256 
257 	device = fw_device_get_by_devt(inode->i_rdev);
258 	if (device == NULL)
259 		return -ENODEV;
260 
261 	if (fw_device_is_shutdown(device)) {
262 		fw_device_put(device);
263 		return -ENODEV;
264 	}
265 
266 	client = kzalloc(sizeof(*client), GFP_KERNEL);
267 	if (client == NULL) {
268 		fw_device_put(device);
269 		return -ENOMEM;
270 	}
271 
272 	client->device = device;
273 	spin_lock_init(&client->lock);
274 	idr_init(&client->resource_idr);
275 	INIT_LIST_HEAD(&client->event_list);
276 	init_waitqueue_head(&client->wait);
277 	init_waitqueue_head(&client->tx_flush_wait);
278 	INIT_LIST_HEAD(&client->phy_receiver_link);
279 	INIT_LIST_HEAD(&client->link);
280 	kref_init(&client->kref);
281 
282 	file->private_data = client;
283 
284 	return nonseekable_open(inode, file);
285 }
286 
287 static void queue_event(struct client *client, struct event *event,
288 			void *data0, size_t size0, void *data1, size_t size1)
289 {
290 	unsigned long flags;
291 
292 	event->v[0].data = data0;
293 	event->v[0].size = size0;
294 	event->v[1].data = data1;
295 	event->v[1].size = size1;
296 
297 	spin_lock_irqsave(&client->lock, flags);
298 	if (client->in_shutdown)
299 		kfree(event);
300 	else
301 		list_add_tail(&event->link, &client->event_list);
302 	spin_unlock_irqrestore(&client->lock, flags);
303 
304 	wake_up_interruptible(&client->wait);
305 }
306 
307 static int dequeue_event(struct client *client,
308 			 char __user *buffer, size_t count)
309 {
310 	struct event *event;
311 	size_t size, total;
312 	int i, ret;
313 
314 	ret = wait_event_interruptible(client->wait,
315 			!list_empty(&client->event_list) ||
316 			fw_device_is_shutdown(client->device));
317 	if (ret < 0)
318 		return ret;
319 
320 	if (list_empty(&client->event_list) &&
321 		       fw_device_is_shutdown(client->device))
322 		return -ENODEV;
323 
324 	spin_lock_irq(&client->lock);
325 	event = list_first_entry(&client->event_list, struct event, link);
326 	list_del(&event->link);
327 	spin_unlock_irq(&client->lock);
328 
329 	total = 0;
330 	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
331 		size = min(event->v[i].size, count - total);
332 		if (copy_to_user(buffer + total, event->v[i].data, size)) {
333 			ret = -EFAULT;
334 			goto out;
335 		}
336 		total += size;
337 	}
338 	ret = total;
339 
340  out:
341 	kfree(event);
342 
343 	return ret;
344 }
345 
346 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
347 				 size_t count, loff_t *offset)
348 {
349 	struct client *client = file->private_data;
350 
351 	return dequeue_event(client, buffer, count);
352 }
353 
354 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
355 				 struct client *client)
356 {
357 	struct fw_card *card = client->device->card;
358 
359 	spin_lock_irq(&card->lock);
360 
361 	event->closure	     = client->bus_reset_closure;
362 	event->type          = FW_CDEV_EVENT_BUS_RESET;
363 	event->generation    = client->device->generation;
364 	event->node_id       = client->device->node_id;
365 	event->local_node_id = card->local_node->node_id;
366 	event->bm_node_id    = card->bm_node_id;
367 	event->irm_node_id   = card->irm_node->node_id;
368 	event->root_node_id  = card->root_node->node_id;
369 
370 	spin_unlock_irq(&card->lock);
371 }
372 
373 static void for_each_client(struct fw_device *device,
374 			    void (*callback)(struct client *client))
375 {
376 	struct client *c;
377 
378 	guard(mutex)(&device->client_list_mutex);
379 
380 	list_for_each_entry(c, &device->client_list, link)
381 		callback(c);
382 }
383 
384 static int schedule_reallocations(int id, void *p, void *data)
385 {
386 	schedule_if_iso_resource(p);
387 
388 	return 0;
389 }
390 
391 static void queue_bus_reset_event(struct client *client)
392 {
393 	struct bus_reset_event *e;
394 
395 	e = kzalloc(sizeof(*e), GFP_KERNEL);
396 	if (e == NULL)
397 		return;
398 
399 	fill_bus_reset_event(&e->reset, client);
400 
401 	queue_event(client, &e->event,
402 		    &e->reset, sizeof(e->reset), NULL, 0);
403 
404 	spin_lock_irq(&client->lock);
405 	idr_for_each(&client->resource_idr, schedule_reallocations, client);
406 	spin_unlock_irq(&client->lock);
407 }
408 
409 void fw_device_cdev_update(struct fw_device *device)
410 {
411 	for_each_client(device, queue_bus_reset_event);
412 }
413 
414 static void wake_up_client(struct client *client)
415 {
416 	wake_up_interruptible(&client->wait);
417 }
418 
419 void fw_device_cdev_remove(struct fw_device *device)
420 {
421 	for_each_client(device, wake_up_client);
422 }
423 
424 union ioctl_arg {
425 	struct fw_cdev_get_info			get_info;
426 	struct fw_cdev_send_request		send_request;
427 	struct fw_cdev_allocate			allocate;
428 	struct fw_cdev_deallocate		deallocate;
429 	struct fw_cdev_send_response		send_response;
430 	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
431 	struct fw_cdev_add_descriptor		add_descriptor;
432 	struct fw_cdev_remove_descriptor	remove_descriptor;
433 	struct fw_cdev_create_iso_context	create_iso_context;
434 	struct fw_cdev_queue_iso		queue_iso;
435 	struct fw_cdev_start_iso		start_iso;
436 	struct fw_cdev_stop_iso			stop_iso;
437 	struct fw_cdev_get_cycle_timer		get_cycle_timer;
438 	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
439 	struct fw_cdev_send_stream_packet	send_stream_packet;
440 	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
441 	struct fw_cdev_send_phy_packet		send_phy_packet;
442 	struct fw_cdev_receive_phy_packets	receive_phy_packets;
443 	struct fw_cdev_set_iso_channels		set_iso_channels;
444 	struct fw_cdev_flush_iso		flush_iso;
445 };
446 
447 static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
448 {
449 	struct fw_cdev_get_info *a = &arg->get_info;
450 	struct fw_cdev_event_bus_reset bus_reset;
451 	unsigned long ret = 0;
452 
453 	client->version = a->version;
454 	a->version = FW_CDEV_KERNEL_VERSION;
455 	a->card = client->device->card->index;
456 
457 	scoped_guard(rwsem_read, &fw_device_rwsem) {
458 		if (a->rom != 0) {
459 			size_t want = a->rom_length;
460 			size_t have = client->device->config_rom_length * 4;
461 
462 			ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom,
463 					   min(want, have));
464 			if (ret != 0)
465 				return -EFAULT;
466 		}
467 		a->rom_length = client->device->config_rom_length * 4;
468 	}
469 
470 	guard(mutex)(&client->device->client_list_mutex);
471 
472 	client->bus_reset_closure = a->bus_reset_closure;
473 	if (a->bus_reset != 0) {
474 		fill_bus_reset_event(&bus_reset, client);
475 		/* unaligned size of bus_reset is 36 bytes */
476 		ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
477 	}
478 	if (ret == 0 && list_empty(&client->link))
479 		list_add_tail(&client->link, &client->device->client_list);
480 
481 	return ret ? -EFAULT : 0;
482 }
483 
484 static int add_client_resource(struct client *client,
485 			       struct client_resource *resource, gfp_t gfp_mask)
486 {
487 	bool preload = gfpflags_allow_blocking(gfp_mask);
488 	unsigned long flags;
489 	int ret;
490 
491 	if (preload)
492 		idr_preload(gfp_mask);
493 	spin_lock_irqsave(&client->lock, flags);
494 
495 	if (client->in_shutdown)
496 		ret = -ECANCELED;
497 	else
498 		ret = idr_alloc(&client->resource_idr, resource, 0, 0,
499 				GFP_NOWAIT);
500 	if (ret >= 0) {
501 		resource->handle = ret;
502 		client_get(client);
503 		schedule_if_iso_resource(resource);
504 	}
505 
506 	spin_unlock_irqrestore(&client->lock, flags);
507 	if (preload)
508 		idr_preload_end();
509 
510 	return ret < 0 ? ret : 0;
511 }
512 
513 static int release_client_resource(struct client *client, u32 handle,
514 				   client_resource_release_fn_t release,
515 				   struct client_resource **return_resource)
516 {
517 	struct client_resource *resource;
518 
519 	spin_lock_irq(&client->lock);
520 	if (client->in_shutdown)
521 		resource = NULL;
522 	else
523 		resource = idr_find(&client->resource_idr, handle);
524 	if (resource && resource->release == release)
525 		idr_remove(&client->resource_idr, handle);
526 	spin_unlock_irq(&client->lock);
527 
528 	if (!(resource && resource->release == release))
529 		return -EINVAL;
530 
531 	if (return_resource)
532 		*return_resource = resource;
533 	else
534 		resource->release(client, resource);
535 
536 	client_put(client);
537 
538 	return 0;
539 }
540 
541 static void release_transaction(struct client *client,
542 				struct client_resource *resource)
543 {
544 }
545 
546 static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp,
547 				 u32 response_tstamp, void *payload, size_t length, void *data)
548 {
549 	struct outbound_transaction_event *e = data;
550 	struct client *client = e->client;
551 	unsigned long flags;
552 
553 	spin_lock_irqsave(&client->lock, flags);
554 	idr_remove(&client->resource_idr, e->r.resource.handle);
555 	if (client->in_shutdown)
556 		wake_up(&client->tx_flush_wait);
557 	spin_unlock_irqrestore(&client->lock, flags);
558 
559 	switch (e->rsp.without_tstamp.type) {
560 	case FW_CDEV_EVENT_RESPONSE:
561 	{
562 		struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
563 
564 		if (length < rsp->length)
565 			rsp->length = length;
566 		if (rcode == RCODE_COMPLETE)
567 			memcpy(rsp->data, payload, rsp->length);
568 
569 		rsp->rcode = rcode;
570 
571 		// In the case that sizeof(*rsp) doesn't align with the position of the
572 		// data, and the read is short, preserve an extra copy of the data
573 		// to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
574 		// for short reads and some apps depended on it, this is both safe
575 		// and prudent for compatibility.
576 		if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
577 			queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length);
578 		else
579 			queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
580 
581 		break;
582 	}
583 	case FW_CDEV_EVENT_RESPONSE2:
584 	{
585 		struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
586 
587 		if (length < rsp->length)
588 			rsp->length = length;
589 		if (rcode == RCODE_COMPLETE)
590 			memcpy(rsp->data, payload, rsp->length);
591 
592 		rsp->rcode = rcode;
593 		rsp->request_tstamp = request_tstamp;
594 		rsp->response_tstamp = response_tstamp;
595 
596 		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
597 
598 		break;
599 	default:
600 		WARN_ON(1);
601 		break;
602 	}
603 	}
604 
605 	/* Drop the idr's reference */
606 	client_put(client);
607 }
608 
609 static int init_request(struct client *client,
610 			struct fw_cdev_send_request *request,
611 			int destination_id, int speed)
612 {
613 	struct outbound_transaction_event *e;
614 	void *payload;
615 	int ret;
616 
617 	if (request->tcode != TCODE_STREAM_DATA &&
618 	    (request->length > 4096 || request->length > 512 << speed))
619 		return -EIO;
620 
621 	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
622 	    request->length < 4)
623 		return -EINVAL;
624 
625 	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
626 	if (e == NULL)
627 		return -ENOMEM;
628 	e->client = client;
629 
630 	if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
631 		struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
632 
633 		rsp->type = FW_CDEV_EVENT_RESPONSE;
634 		rsp->length = request->length;
635 		rsp->closure = request->closure;
636 		payload = rsp->data;
637 	} else {
638 		struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
639 
640 		rsp->type = FW_CDEV_EVENT_RESPONSE2;
641 		rsp->length = request->length;
642 		rsp->closure = request->closure;
643 		payload = rsp->data;
644 	}
645 
646 	if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) {
647 		ret = -EFAULT;
648 		goto failed;
649 	}
650 
651 	e->r.resource.release = release_transaction;
652 	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
653 	if (ret < 0)
654 		goto failed;
655 
656 	fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode,
657 				    destination_id, request->generation, speed, request->offset,
658 				    payload, request->length, complete_transaction, e);
659 	return 0;
660 
661  failed:
662 	kfree(e);
663 
664 	return ret;
665 }
666 
667 static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
668 {
669 	switch (arg->send_request.tcode) {
670 	case TCODE_WRITE_QUADLET_REQUEST:
671 	case TCODE_WRITE_BLOCK_REQUEST:
672 	case TCODE_READ_QUADLET_REQUEST:
673 	case TCODE_READ_BLOCK_REQUEST:
674 	case TCODE_LOCK_MASK_SWAP:
675 	case TCODE_LOCK_COMPARE_SWAP:
676 	case TCODE_LOCK_FETCH_ADD:
677 	case TCODE_LOCK_LITTLE_ADD:
678 	case TCODE_LOCK_BOUNDED_ADD:
679 	case TCODE_LOCK_WRAP_ADD:
680 	case TCODE_LOCK_VENDOR_DEPENDENT:
681 		break;
682 	default:
683 		return -EINVAL;
684 	}
685 
686 	return init_request(client, &arg->send_request, client->device->node_id,
687 			    client->device->max_speed);
688 }
689 
690 static void release_request(struct client *client,
691 			    struct client_resource *resource)
692 {
693 	struct inbound_transaction_resource *r = container_of(resource,
694 			struct inbound_transaction_resource, resource);
695 
696 	if (r->is_fcp)
697 		fw_request_put(r->request);
698 	else
699 		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
700 
701 	fw_card_put(r->card);
702 	kfree(r);
703 }
704 
705 static void handle_request(struct fw_card *card, struct fw_request *request,
706 			   int tcode, int destination, int source,
707 			   int generation, unsigned long long offset,
708 			   void *payload, size_t length, void *callback_data)
709 {
710 	struct address_handler_resource *handler = callback_data;
711 	bool is_fcp = is_in_fcp_region(offset, length);
712 	struct inbound_transaction_resource *r;
713 	struct inbound_transaction_event *e;
714 	size_t event_size0;
715 	int ret;
716 
717 	/* card may be different from handler->client->device->card */
718 	fw_card_get(card);
719 
720 	// Extend the lifetime of data for request so that its payload is safely accessible in
721 	// the process context for the client.
722 	if (is_fcp)
723 		fw_request_get(request);
724 
725 	r = kmalloc(sizeof(*r), GFP_ATOMIC);
726 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
727 	if (r == NULL || e == NULL)
728 		goto failed;
729 
730 	r->card    = card;
731 	r->request = request;
732 	r->is_fcp  = is_fcp;
733 	r->data    = payload;
734 	r->length  = length;
735 
736 	r->resource.release = release_request;
737 	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
738 	if (ret < 0)
739 		goto failed;
740 
741 	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
742 		struct fw_cdev_event_request *req = &e->req.request;
743 
744 		if (tcode & 0x10)
745 			tcode = TCODE_LOCK_REQUEST;
746 
747 		req->type	= FW_CDEV_EVENT_REQUEST;
748 		req->tcode	= tcode;
749 		req->offset	= offset;
750 		req->length	= length;
751 		req->handle	= r->resource.handle;
752 		req->closure	= handler->closure;
753 		event_size0	= sizeof(*req);
754 	} else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
755 		struct fw_cdev_event_request2 *req = &e->req.request2;
756 
757 		req->type	= FW_CDEV_EVENT_REQUEST2;
758 		req->tcode	= tcode;
759 		req->offset	= offset;
760 		req->source_node_id = source;
761 		req->destination_node_id = destination;
762 		req->card	= card->index;
763 		req->generation	= generation;
764 		req->length	= length;
765 		req->handle	= r->resource.handle;
766 		req->closure	= handler->closure;
767 		event_size0	= sizeof(*req);
768 	} else {
769 		struct fw_cdev_event_request3 *req = &e->req.with_tstamp;
770 
771 		req->type	= FW_CDEV_EVENT_REQUEST3;
772 		req->tcode	= tcode;
773 		req->offset	= offset;
774 		req->source_node_id = source;
775 		req->destination_node_id = destination;
776 		req->card	= card->index;
777 		req->generation	= generation;
778 		req->length	= length;
779 		req->handle	= r->resource.handle;
780 		req->closure	= handler->closure;
781 		req->tstamp	= fw_request_get_timestamp(request);
782 		event_size0	= sizeof(*req);
783 	}
784 
785 	queue_event(handler->client, &e->event,
786 		    &e->req, event_size0, r->data, length);
787 	return;
788 
789  failed:
790 	kfree(r);
791 	kfree(e);
792 
793 	if (!is_fcp)
794 		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
795 	else
796 		fw_request_put(request);
797 
798 	fw_card_put(card);
799 }
800 
801 static void release_address_handler(struct client *client,
802 				    struct client_resource *resource)
803 {
804 	struct address_handler_resource *r =
805 	    container_of(resource, struct address_handler_resource, resource);
806 
807 	fw_core_remove_address_handler(&r->handler);
808 	kfree(r);
809 }
810 
811 static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
812 {
813 	struct fw_cdev_allocate *a = &arg->allocate;
814 	struct address_handler_resource *r;
815 	struct fw_address_region region;
816 	int ret;
817 
818 	r = kmalloc(sizeof(*r), GFP_KERNEL);
819 	if (r == NULL)
820 		return -ENOMEM;
821 
822 	region.start = a->offset;
823 	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
824 		region.end = a->offset + a->length;
825 	else
826 		region.end = a->region_end;
827 
828 	r->handler.length           = a->length;
829 	r->handler.address_callback = handle_request;
830 	r->handler.callback_data    = r;
831 	r->closure   = a->closure;
832 	r->client    = client;
833 
834 	ret = fw_core_add_address_handler(&r->handler, &region);
835 	if (ret < 0) {
836 		kfree(r);
837 		return ret;
838 	}
839 	a->offset = r->handler.offset;
840 
841 	r->resource.release = release_address_handler;
842 	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
843 	if (ret < 0) {
844 		release_address_handler(client, &r->resource);
845 		return ret;
846 	}
847 	a->handle = r->resource.handle;
848 
849 	return 0;
850 }
851 
852 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
853 {
854 	return release_client_resource(client, arg->deallocate.handle,
855 				       release_address_handler, NULL);
856 }
857 
858 static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
859 {
860 	struct fw_cdev_send_response *a = &arg->send_response;
861 	struct client_resource *resource;
862 	struct inbound_transaction_resource *r;
863 	int ret = 0;
864 
865 	if (release_client_resource(client, a->handle,
866 				    release_request, &resource) < 0)
867 		return -EINVAL;
868 
869 	r = container_of(resource, struct inbound_transaction_resource,
870 			 resource);
871 	if (r->is_fcp) {
872 		fw_request_put(r->request);
873 		goto out;
874 	}
875 
876 	if (a->length != fw_get_response_length(r->request)) {
877 		ret = -EINVAL;
878 		fw_request_put(r->request);
879 		goto out;
880 	}
881 	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
882 		ret = -EFAULT;
883 		fw_request_put(r->request);
884 		goto out;
885 	}
886 	fw_send_response(r->card, r->request, a->rcode);
887  out:
888 	fw_card_put(r->card);
889 	kfree(r);
890 
891 	return ret;
892 }
893 
894 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
895 {
896 	fw_schedule_bus_reset(client->device->card, true,
897 			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
898 	return 0;
899 }
900 
901 static void release_descriptor(struct client *client,
902 			       struct client_resource *resource)
903 {
904 	struct descriptor_resource *r =
905 		container_of(resource, struct descriptor_resource, resource);
906 
907 	fw_core_remove_descriptor(&r->descriptor);
908 	kfree(r);
909 }
910 
911 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
912 {
913 	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
914 	struct descriptor_resource *r;
915 	int ret;
916 
917 	/* Access policy: Allow this ioctl only on local nodes' device files. */
918 	if (!client->device->is_local)
919 		return -ENOSYS;
920 
921 	if (a->length > 256)
922 		return -EINVAL;
923 
924 	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
925 	if (r == NULL)
926 		return -ENOMEM;
927 
928 	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
929 		ret = -EFAULT;
930 		goto failed;
931 	}
932 
933 	r->descriptor.length    = a->length;
934 	r->descriptor.immediate = a->immediate;
935 	r->descriptor.key       = a->key;
936 	r->descriptor.data      = r->data;
937 
938 	ret = fw_core_add_descriptor(&r->descriptor);
939 	if (ret < 0)
940 		goto failed;
941 
942 	r->resource.release = release_descriptor;
943 	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
944 	if (ret < 0) {
945 		fw_core_remove_descriptor(&r->descriptor);
946 		goto failed;
947 	}
948 	a->handle = r->resource.handle;
949 
950 	return 0;
951  failed:
952 	kfree(r);
953 
954 	return ret;
955 }
956 
957 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
958 {
959 	return release_client_resource(client, arg->remove_descriptor.handle,
960 				       release_descriptor, NULL);
961 }
962 
963 static void iso_callback(struct fw_iso_context *context, u32 cycle,
964 			 size_t header_length, void *header, void *data)
965 {
966 	struct client *client = data;
967 	struct iso_interrupt_event *e;
968 
969 	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
970 	if (e == NULL)
971 		return;
972 
973 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
974 	e->interrupt.closure   = client->iso_closure;
975 	e->interrupt.cycle     = cycle;
976 	e->interrupt.header_length = header_length;
977 	memcpy(e->interrupt.header, header, header_length);
978 	queue_event(client, &e->event, &e->interrupt,
979 		    sizeof(e->interrupt) + header_length, NULL, 0);
980 }
981 
982 static void iso_mc_callback(struct fw_iso_context *context,
983 			    dma_addr_t completed, void *data)
984 {
985 	struct client *client = data;
986 	struct iso_interrupt_mc_event *e;
987 
988 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
989 	if (e == NULL)
990 		return;
991 
992 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
993 	e->interrupt.closure   = client->iso_closure;
994 	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
995 						      completed);
996 	queue_event(client, &e->event, &e->interrupt,
997 		    sizeof(e->interrupt), NULL, 0);
998 }
999 
1000 static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
1001 {
1002 		if (context->type == FW_ISO_CONTEXT_TRANSMIT)
1003 			return DMA_TO_DEVICE;
1004 		else
1005 			return DMA_FROM_DEVICE;
1006 }
1007 
1008 static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
1009 						fw_iso_mc_callback_t callback,
1010 						void *callback_data)
1011 {
1012 	struct fw_iso_context *ctx;
1013 
1014 	ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
1015 				    0, 0, 0, NULL, callback_data);
1016 	if (!IS_ERR(ctx))
1017 		ctx->callback.mc = callback;
1018 
1019 	return ctx;
1020 }
1021 
1022 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1023 {
1024 	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
1025 	struct fw_iso_context *context;
1026 	union fw_iso_callback cb;
1027 	int ret;
1028 
1029 	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
1030 		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
1031 		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
1032 					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
1033 
1034 	switch (a->type) {
1035 	case FW_ISO_CONTEXT_TRANSMIT:
1036 		if (a->speed > SCODE_3200 || a->channel > 63)
1037 			return -EINVAL;
1038 
1039 		cb.sc = iso_callback;
1040 		break;
1041 
1042 	case FW_ISO_CONTEXT_RECEIVE:
1043 		if (a->header_size < 4 || (a->header_size & 3) ||
1044 		    a->channel > 63)
1045 			return -EINVAL;
1046 
1047 		cb.sc = iso_callback;
1048 		break;
1049 
1050 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1051 		cb.mc = iso_mc_callback;
1052 		break;
1053 
1054 	default:
1055 		return -EINVAL;
1056 	}
1057 
1058 	if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1059 		context = fw_iso_mc_context_create(client->device->card, cb.mc,
1060 						   client);
1061 	else
1062 		context = fw_iso_context_create(client->device->card, a->type,
1063 						a->channel, a->speed,
1064 						a->header_size, cb.sc, client);
1065 	if (IS_ERR(context))
1066 		return PTR_ERR(context);
1067 	if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1068 		context->drop_overflow_headers = true;
1069 
1070 	/* We only support one context at this time. */
1071 	spin_lock_irq(&client->lock);
1072 	if (client->iso_context != NULL) {
1073 		spin_unlock_irq(&client->lock);
1074 		fw_iso_context_destroy(context);
1075 
1076 		return -EBUSY;
1077 	}
1078 	if (!client->buffer_is_mapped) {
1079 		ret = fw_iso_buffer_map_dma(&client->buffer,
1080 					    client->device->card,
1081 					    iso_dma_direction(context));
1082 		if (ret < 0) {
1083 			spin_unlock_irq(&client->lock);
1084 			fw_iso_context_destroy(context);
1085 
1086 			return ret;
1087 		}
1088 		client->buffer_is_mapped = true;
1089 	}
1090 	client->iso_closure = a->closure;
1091 	client->iso_context = context;
1092 	spin_unlock_irq(&client->lock);
1093 
1094 	a->handle = 0;
1095 
1096 	return 0;
1097 }
1098 
1099 static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1100 {
1101 	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1102 	struct fw_iso_context *ctx = client->iso_context;
1103 
1104 	if (ctx == NULL || a->handle != 0)
1105 		return -EINVAL;
1106 
1107 	return fw_iso_context_set_channels(ctx, &a->channels);
1108 }
1109 
1110 /* Macros for decoding the iso packet control header. */
1111 #define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
1112 #define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
1113 #define GET_SKIP(v)		(((v) >> 17) & 0x01)
1114 #define GET_TAG(v)		(((v) >> 18) & 0x03)
1115 #define GET_SY(v)		(((v) >> 20) & 0x0f)
1116 #define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
1117 
1118 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1119 {
1120 	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1121 	struct fw_cdev_iso_packet __user *p, *end, *next;
1122 	struct fw_iso_context *ctx = client->iso_context;
1123 	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1124 	u32 control;
1125 	int count;
1126 	struct {
1127 		struct fw_iso_packet packet;
1128 		u8 header[256];
1129 	} u;
1130 
1131 	if (ctx == NULL || a->handle != 0)
1132 		return -EINVAL;
1133 
1134 	/*
1135 	 * If the user passes a non-NULL data pointer, has mmap()'ed
1136 	 * the iso buffer, and the pointer points inside the buffer,
1137 	 * we setup the payload pointers accordingly.  Otherwise we
1138 	 * set them both to 0, which will still let packets with
1139 	 * payload_length == 0 through.  In other words, if no packets
1140 	 * use the indirect payload, the iso buffer need not be mapped
1141 	 * and the a->data pointer is ignored.
1142 	 */
1143 	payload = (unsigned long)a->data - client->vm_start;
1144 	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1145 	if (a->data == 0 || client->buffer.pages == NULL ||
1146 	    payload >= buffer_end) {
1147 		payload = 0;
1148 		buffer_end = 0;
1149 	}
1150 
1151 	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1152 		return -EINVAL;
1153 
1154 	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1155 
1156 	end = (void __user *)p + a->size;
1157 	count = 0;
1158 	while (p < end) {
1159 		if (get_user(control, &p->control))
1160 			return -EFAULT;
1161 		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1162 		u.packet.interrupt = GET_INTERRUPT(control);
1163 		u.packet.skip = GET_SKIP(control);
1164 		u.packet.tag = GET_TAG(control);
1165 		u.packet.sy = GET_SY(control);
1166 		u.packet.header_length = GET_HEADER_LENGTH(control);
1167 
1168 		switch (ctx->type) {
1169 		case FW_ISO_CONTEXT_TRANSMIT:
1170 			if (u.packet.header_length & 3)
1171 				return -EINVAL;
1172 			transmit_header_bytes = u.packet.header_length;
1173 			break;
1174 
1175 		case FW_ISO_CONTEXT_RECEIVE:
1176 			if (u.packet.header_length == 0 ||
1177 			    u.packet.header_length % ctx->header_size != 0)
1178 				return -EINVAL;
1179 			break;
1180 
1181 		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1182 			if (u.packet.payload_length == 0 ||
1183 			    u.packet.payload_length & 3)
1184 				return -EINVAL;
1185 			break;
1186 		}
1187 
1188 		next = (struct fw_cdev_iso_packet __user *)
1189 			&p->header[transmit_header_bytes / 4];
1190 		if (next > end)
1191 			return -EINVAL;
1192 		if (copy_from_user
1193 		    (u.packet.header, p->header, transmit_header_bytes))
1194 			return -EFAULT;
1195 		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1196 		    u.packet.header_length + u.packet.payload_length > 0)
1197 			return -EINVAL;
1198 		if (payload + u.packet.payload_length > buffer_end)
1199 			return -EINVAL;
1200 
1201 		if (fw_iso_context_queue(ctx, &u.packet,
1202 					 &client->buffer, payload))
1203 			break;
1204 
1205 		p = next;
1206 		payload += u.packet.payload_length;
1207 		count++;
1208 	}
1209 	fw_iso_context_queue_flush(ctx);
1210 
1211 	a->size    -= uptr_to_u64(p) - a->packets;
1212 	a->packets  = uptr_to_u64(p);
1213 	a->data     = client->vm_start + payload;
1214 
1215 	return count;
1216 }
1217 
1218 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1219 {
1220 	struct fw_cdev_start_iso *a = &arg->start_iso;
1221 
1222 	BUILD_BUG_ON(
1223 	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1224 	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1225 	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1226 	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1227 	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1228 
1229 	if (client->iso_context == NULL || a->handle != 0)
1230 		return -EINVAL;
1231 
1232 	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1233 	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
1234 		return -EINVAL;
1235 
1236 	return fw_iso_context_start(client->iso_context,
1237 				    a->cycle, a->sync, a->tags);
1238 }
1239 
1240 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1241 {
1242 	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1243 
1244 	if (client->iso_context == NULL || a->handle != 0)
1245 		return -EINVAL;
1246 
1247 	return fw_iso_context_stop(client->iso_context);
1248 }
1249 
1250 static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1251 {
1252 	struct fw_cdev_flush_iso *a = &arg->flush_iso;
1253 
1254 	if (client->iso_context == NULL || a->handle != 0)
1255 		return -EINVAL;
1256 
1257 	return fw_iso_context_flush_completions(client->iso_context);
1258 }
1259 
1260 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1261 {
1262 	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1263 	struct fw_card *card = client->device->card;
1264 	struct timespec64 ts = {0, 0};
1265 	u32 cycle_time = 0;
1266 	int ret = 0;
1267 
1268 	local_irq_disable();
1269 
1270 	ret = fw_card_read_cycle_time(card, &cycle_time);
1271 	if (ret < 0)
1272 		goto end;
1273 
1274 	switch (a->clk_id) {
1275 	case CLOCK_REALTIME:      ktime_get_real_ts64(&ts);	break;
1276 	case CLOCK_MONOTONIC:     ktime_get_ts64(&ts);		break;
1277 	case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts);	break;
1278 	default:
1279 		ret = -EINVAL;
1280 	}
1281 end:
1282 	local_irq_enable();
1283 
1284 	a->tv_sec      = ts.tv_sec;
1285 	a->tv_nsec     = ts.tv_nsec;
1286 	a->cycle_timer = cycle_time;
1287 
1288 	return ret;
1289 }
1290 
1291 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1292 {
1293 	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1294 	struct fw_cdev_get_cycle_timer2 ct2;
1295 
1296 	ct2.clk_id = CLOCK_REALTIME;
1297 	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1298 
1299 	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1300 	a->cycle_timer = ct2.cycle_timer;
1301 
1302 	return 0;
1303 }
1304 
1305 static void iso_resource_work(struct work_struct *work)
1306 {
1307 	struct iso_resource_event *e;
1308 	struct iso_resource *r =
1309 			container_of(work, struct iso_resource, work.work);
1310 	struct client *client = r->client;
1311 	int generation, channel, bandwidth, todo;
1312 	bool skip, free, success;
1313 
1314 	spin_lock_irq(&client->lock);
1315 	generation = client->device->generation;
1316 	todo = r->todo;
1317 	/* Allow 1000ms grace period for other reallocations. */
1318 	if (todo == ISO_RES_ALLOC &&
1319 	    time_before64(get_jiffies_64(),
1320 			  client->device->card->reset_jiffies + HZ)) {
1321 		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1322 		skip = true;
1323 	} else {
1324 		/* We could be called twice within the same generation. */
1325 		skip = todo == ISO_RES_REALLOC &&
1326 		       r->generation == generation;
1327 	}
1328 	free = todo == ISO_RES_DEALLOC ||
1329 	       todo == ISO_RES_ALLOC_ONCE ||
1330 	       todo == ISO_RES_DEALLOC_ONCE;
1331 	r->generation = generation;
1332 	spin_unlock_irq(&client->lock);
1333 
1334 	if (skip)
1335 		goto out;
1336 
1337 	bandwidth = r->bandwidth;
1338 
1339 	fw_iso_resource_manage(client->device->card, generation,
1340 			r->channels, &channel, &bandwidth,
1341 			todo == ISO_RES_ALLOC ||
1342 			todo == ISO_RES_REALLOC ||
1343 			todo == ISO_RES_ALLOC_ONCE);
1344 	/*
1345 	 * Is this generation outdated already?  As long as this resource sticks
1346 	 * in the idr, it will be scheduled again for a newer generation or at
1347 	 * shutdown.
1348 	 */
1349 	if (channel == -EAGAIN &&
1350 	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1351 		goto out;
1352 
1353 	success = channel >= 0 || bandwidth > 0;
1354 
1355 	spin_lock_irq(&client->lock);
1356 	/*
1357 	 * Transit from allocation to reallocation, except if the client
1358 	 * requested deallocation in the meantime.
1359 	 */
1360 	if (r->todo == ISO_RES_ALLOC)
1361 		r->todo = ISO_RES_REALLOC;
1362 	/*
1363 	 * Allocation or reallocation failure?  Pull this resource out of the
1364 	 * idr and prepare for deletion, unless the client is shutting down.
1365 	 */
1366 	if (r->todo == ISO_RES_REALLOC && !success &&
1367 	    !client->in_shutdown &&
1368 	    idr_remove(&client->resource_idr, r->resource.handle)) {
1369 		client_put(client);
1370 		free = true;
1371 	}
1372 	spin_unlock_irq(&client->lock);
1373 
1374 	if (todo == ISO_RES_ALLOC && channel >= 0)
1375 		r->channels = 1ULL << channel;
1376 
1377 	if (todo == ISO_RES_REALLOC && success)
1378 		goto out;
1379 
1380 	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1381 		e = r->e_alloc;
1382 		r->e_alloc = NULL;
1383 	} else {
1384 		e = r->e_dealloc;
1385 		r->e_dealloc = NULL;
1386 	}
1387 	e->iso_resource.handle    = r->resource.handle;
1388 	e->iso_resource.channel   = channel;
1389 	e->iso_resource.bandwidth = bandwidth;
1390 
1391 	queue_event(client, &e->event,
1392 		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1393 
1394 	if (free) {
1395 		cancel_delayed_work(&r->work);
1396 		kfree(r->e_alloc);
1397 		kfree(r->e_dealloc);
1398 		kfree(r);
1399 	}
1400  out:
1401 	client_put(client);
1402 }
1403 
1404 static void release_iso_resource(struct client *client,
1405 				 struct client_resource *resource)
1406 {
1407 	struct iso_resource *r =
1408 		container_of(resource, struct iso_resource, resource);
1409 
1410 	spin_lock_irq(&client->lock);
1411 	r->todo = ISO_RES_DEALLOC;
1412 	schedule_iso_resource(r, 0);
1413 	spin_unlock_irq(&client->lock);
1414 }
1415 
1416 static int init_iso_resource(struct client *client,
1417 		struct fw_cdev_allocate_iso_resource *request, int todo)
1418 {
1419 	struct iso_resource_event *e1, *e2;
1420 	struct iso_resource *r;
1421 	int ret;
1422 
1423 	if ((request->channels == 0 && request->bandwidth == 0) ||
1424 	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1425 		return -EINVAL;
1426 
1427 	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1428 	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1429 	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1430 	if (r == NULL || e1 == NULL || e2 == NULL) {
1431 		ret = -ENOMEM;
1432 		goto fail;
1433 	}
1434 
1435 	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1436 	r->client	= client;
1437 	r->todo		= todo;
1438 	r->generation	= -1;
1439 	r->channels	= request->channels;
1440 	r->bandwidth	= request->bandwidth;
1441 	r->e_alloc	= e1;
1442 	r->e_dealloc	= e2;
1443 
1444 	e1->iso_resource.closure = request->closure;
1445 	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1446 	e2->iso_resource.closure = request->closure;
1447 	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1448 
1449 	if (todo == ISO_RES_ALLOC) {
1450 		r->resource.release = release_iso_resource;
1451 		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1452 		if (ret < 0)
1453 			goto fail;
1454 	} else {
1455 		r->resource.release = NULL;
1456 		r->resource.handle = -1;
1457 		schedule_iso_resource(r, 0);
1458 	}
1459 	request->handle = r->resource.handle;
1460 
1461 	return 0;
1462  fail:
1463 	kfree(r);
1464 	kfree(e1);
1465 	kfree(e2);
1466 
1467 	return ret;
1468 }
1469 
1470 static int ioctl_allocate_iso_resource(struct client *client,
1471 				       union ioctl_arg *arg)
1472 {
1473 	return init_iso_resource(client,
1474 			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1475 }
1476 
1477 static int ioctl_deallocate_iso_resource(struct client *client,
1478 					 union ioctl_arg *arg)
1479 {
1480 	return release_client_resource(client,
1481 			arg->deallocate.handle, release_iso_resource, NULL);
1482 }
1483 
1484 static int ioctl_allocate_iso_resource_once(struct client *client,
1485 					    union ioctl_arg *arg)
1486 {
1487 	return init_iso_resource(client,
1488 			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1489 }
1490 
1491 static int ioctl_deallocate_iso_resource_once(struct client *client,
1492 					      union ioctl_arg *arg)
1493 {
1494 	return init_iso_resource(client,
1495 			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1496 }
1497 
1498 /*
1499  * Returns a speed code:  Maximum speed to or from this device,
1500  * limited by the device's link speed, the local node's link speed,
1501  * and all PHY port speeds between the two links.
1502  */
1503 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1504 {
1505 	return client->device->max_speed;
1506 }
1507 
1508 static int ioctl_send_broadcast_request(struct client *client,
1509 					union ioctl_arg *arg)
1510 {
1511 	struct fw_cdev_send_request *a = &arg->send_request;
1512 
1513 	switch (a->tcode) {
1514 	case TCODE_WRITE_QUADLET_REQUEST:
1515 	case TCODE_WRITE_BLOCK_REQUEST:
1516 		break;
1517 	default:
1518 		return -EINVAL;
1519 	}
1520 
1521 	/* Security policy: Only allow accesses to Units Space. */
1522 	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1523 		return -EACCES;
1524 
1525 	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1526 }
1527 
1528 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1529 {
1530 	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1531 	struct fw_cdev_send_request request;
1532 	int dest;
1533 
1534 	if (a->speed > client->device->card->link_speed ||
1535 	    a->length > 1024 << a->speed)
1536 		return -EIO;
1537 
1538 	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1539 		return -EINVAL;
1540 
1541 	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1542 	request.tcode		= TCODE_STREAM_DATA;
1543 	request.length		= a->length;
1544 	request.closure		= a->closure;
1545 	request.data		= a->data;
1546 	request.generation	= a->generation;
1547 
1548 	return init_request(client, &request, dest, a->speed);
1549 }
1550 
1551 static void outbound_phy_packet_callback(struct fw_packet *packet,
1552 					 struct fw_card *card, int status)
1553 {
1554 	struct outbound_phy_packet_event *e =
1555 		container_of(packet, struct outbound_phy_packet_event, p);
1556 	struct client *e_client = e->client;
1557 	u32 rcode;
1558 
1559 	trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
1560 					  packet->timestamp);
1561 
1562 	switch (status) {
1563 	// expected:
1564 	case ACK_COMPLETE:
1565 		rcode = RCODE_COMPLETE;
1566 		break;
1567 	// should never happen with PHY packets:
1568 	case ACK_PENDING:
1569 		rcode = RCODE_COMPLETE;
1570 		break;
1571 	case ACK_BUSY_X:
1572 	case ACK_BUSY_A:
1573 	case ACK_BUSY_B:
1574 		rcode = RCODE_BUSY;
1575 		break;
1576 	case ACK_DATA_ERROR:
1577 		rcode = RCODE_DATA_ERROR;
1578 		break;
1579 	case ACK_TYPE_ERROR:
1580 		rcode = RCODE_TYPE_ERROR;
1581 		break;
1582 	// stale generation; cancelled; on certain controllers: no ack
1583 	default:
1584 		rcode = status;
1585 		break;
1586 	}
1587 
1588 	switch (e->phy_packet.without_tstamp.type) {
1589 	case FW_CDEV_EVENT_PHY_PACKET_SENT:
1590 	{
1591 		struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1592 
1593 		pp->rcode = rcode;
1594 		pp->data[0] = packet->timestamp;
1595 		queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1596 			    NULL, 0);
1597 		break;
1598 	}
1599 	case FW_CDEV_EVENT_PHY_PACKET_SENT2:
1600 	{
1601 		struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1602 
1603 		pp->rcode = rcode;
1604 		pp->tstamp = packet->timestamp;
1605 		queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1606 			    NULL, 0);
1607 		break;
1608 	}
1609 	default:
1610 		WARN_ON(1);
1611 		break;
1612 	}
1613 
1614 	client_put(e_client);
1615 }
1616 
1617 static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1618 {
1619 	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1620 	struct fw_card *card = client->device->card;
1621 	struct outbound_phy_packet_event *e;
1622 
1623 	/* Access policy: Allow this ioctl only on local nodes' device files. */
1624 	if (!client->device->is_local)
1625 		return -ENOSYS;
1626 
1627 	e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL);
1628 	if (e == NULL)
1629 		return -ENOMEM;
1630 
1631 	client_get(client);
1632 	e->client		= client;
1633 	e->p.speed		= SCODE_100;
1634 	e->p.generation		= a->generation;
1635 	async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL);
1636 	e->p.header[1]		= a->data[0];
1637 	e->p.header[2]		= a->data[1];
1638 	e->p.header_length	= 12;
1639 	e->p.callback		= outbound_phy_packet_callback;
1640 
1641 	if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1642 		struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1643 
1644 		pp->closure = a->closure;
1645 		pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1646 		if (is_ping_packet(a->data))
1647 			pp->length = 4;
1648 	} else {
1649 		struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1650 
1651 		pp->closure = a->closure;
1652 		pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2;
1653 		// Keep the data field so that application can match the response event to the
1654 		// request.
1655 		pp->length = sizeof(a->data);
1656 		memcpy(pp->data, a->data, sizeof(a->data));
1657 	}
1658 
1659 	trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
1660 					  e->p.header[1], e->p.header[2]);
1661 
1662 	card->driver->send_request(card, &e->p);
1663 
1664 	return 0;
1665 }
1666 
1667 static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1668 {
1669 	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1670 	struct fw_card *card = client->device->card;
1671 
1672 	/* Access policy: Allow this ioctl only on local nodes' device files. */
1673 	if (!client->device->is_local)
1674 		return -ENOSYS;
1675 
1676 	spin_lock_irq(&card->lock);
1677 
1678 	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1679 	client->phy_receiver_closure = a->closure;
1680 
1681 	spin_unlock_irq(&card->lock);
1682 
1683 	return 0;
1684 }
1685 
1686 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1687 {
1688 	struct client *client;
1689 	struct inbound_phy_packet_event *e;
1690 	unsigned long flags;
1691 
1692 	spin_lock_irqsave(&card->lock, flags);
1693 
1694 	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1695 		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1696 		if (e == NULL)
1697 			break;
1698 
1699 		if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1700 			struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1701 
1702 			pp->closure = client->phy_receiver_closure;
1703 			pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1704 			pp->rcode = RCODE_COMPLETE;
1705 			pp->length = 8;
1706 			pp->data[0] = p->header[1];
1707 			pp->data[1] = p->header[2];
1708 			queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1709 		} else {
1710 			struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1711 
1712 			pp = &e->phy_packet.with_tstamp;
1713 			pp->closure = client->phy_receiver_closure;
1714 			pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2;
1715 			pp->rcode = RCODE_COMPLETE;
1716 			pp->length = 8;
1717 			pp->tstamp = p->timestamp;
1718 			pp->data[0] = p->header[1];
1719 			pp->data[1] = p->header[2];
1720 			queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1721 		}
1722 	}
1723 
1724 	spin_unlock_irqrestore(&card->lock, flags);
1725 }
1726 
1727 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1728 	[0x00] = ioctl_get_info,
1729 	[0x01] = ioctl_send_request,
1730 	[0x02] = ioctl_allocate,
1731 	[0x03] = ioctl_deallocate,
1732 	[0x04] = ioctl_send_response,
1733 	[0x05] = ioctl_initiate_bus_reset,
1734 	[0x06] = ioctl_add_descriptor,
1735 	[0x07] = ioctl_remove_descriptor,
1736 	[0x08] = ioctl_create_iso_context,
1737 	[0x09] = ioctl_queue_iso,
1738 	[0x0a] = ioctl_start_iso,
1739 	[0x0b] = ioctl_stop_iso,
1740 	[0x0c] = ioctl_get_cycle_timer,
1741 	[0x0d] = ioctl_allocate_iso_resource,
1742 	[0x0e] = ioctl_deallocate_iso_resource,
1743 	[0x0f] = ioctl_allocate_iso_resource_once,
1744 	[0x10] = ioctl_deallocate_iso_resource_once,
1745 	[0x11] = ioctl_get_speed,
1746 	[0x12] = ioctl_send_broadcast_request,
1747 	[0x13] = ioctl_send_stream_packet,
1748 	[0x14] = ioctl_get_cycle_timer2,
1749 	[0x15] = ioctl_send_phy_packet,
1750 	[0x16] = ioctl_receive_phy_packets,
1751 	[0x17] = ioctl_set_iso_channels,
1752 	[0x18] = ioctl_flush_iso,
1753 };
1754 
1755 static int dispatch_ioctl(struct client *client,
1756 			  unsigned int cmd, void __user *arg)
1757 {
1758 	union ioctl_arg buffer;
1759 	int ret;
1760 
1761 	if (fw_device_is_shutdown(client->device))
1762 		return -ENODEV;
1763 
1764 	if (_IOC_TYPE(cmd) != '#' ||
1765 	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1766 	    _IOC_SIZE(cmd) > sizeof(buffer))
1767 		return -ENOTTY;
1768 
1769 	memset(&buffer, 0, sizeof(buffer));
1770 
1771 	if (_IOC_DIR(cmd) & _IOC_WRITE)
1772 		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1773 			return -EFAULT;
1774 
1775 	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1776 	if (ret < 0)
1777 		return ret;
1778 
1779 	if (_IOC_DIR(cmd) & _IOC_READ)
1780 		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1781 			return -EFAULT;
1782 
1783 	return ret;
1784 }
1785 
1786 static long fw_device_op_ioctl(struct file *file,
1787 			       unsigned int cmd, unsigned long arg)
1788 {
1789 	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1790 }
1791 
1792 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1793 {
1794 	struct client *client = file->private_data;
1795 	unsigned long size;
1796 	int page_count, ret;
1797 
1798 	if (fw_device_is_shutdown(client->device))
1799 		return -ENODEV;
1800 
1801 	/* FIXME: We could support multiple buffers, but we don't. */
1802 	if (client->buffer.pages != NULL)
1803 		return -EBUSY;
1804 
1805 	if (!(vma->vm_flags & VM_SHARED))
1806 		return -EINVAL;
1807 
1808 	if (vma->vm_start & ~PAGE_MASK)
1809 		return -EINVAL;
1810 
1811 	client->vm_start = vma->vm_start;
1812 	size = vma->vm_end - vma->vm_start;
1813 	page_count = size >> PAGE_SHIFT;
1814 	if (size & ~PAGE_MASK)
1815 		return -EINVAL;
1816 
1817 	ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1818 	if (ret < 0)
1819 		return ret;
1820 
1821 	spin_lock_irq(&client->lock);
1822 	if (client->iso_context) {
1823 		ret = fw_iso_buffer_map_dma(&client->buffer,
1824 				client->device->card,
1825 				iso_dma_direction(client->iso_context));
1826 		client->buffer_is_mapped = (ret == 0);
1827 	}
1828 	spin_unlock_irq(&client->lock);
1829 	if (ret < 0)
1830 		goto fail;
1831 
1832 	ret = vm_map_pages_zero(vma, client->buffer.pages,
1833 				client->buffer.page_count);
1834 	if (ret < 0)
1835 		goto fail;
1836 
1837 	return 0;
1838  fail:
1839 	fw_iso_buffer_destroy(&client->buffer, client->device->card);
1840 	return ret;
1841 }
1842 
1843 static int is_outbound_transaction_resource(int id, void *p, void *data)
1844 {
1845 	struct client_resource *resource = p;
1846 
1847 	return resource->release == release_transaction;
1848 }
1849 
1850 static int has_outbound_transactions(struct client *client)
1851 {
1852 	int ret;
1853 
1854 	spin_lock_irq(&client->lock);
1855 	ret = idr_for_each(&client->resource_idr,
1856 			   is_outbound_transaction_resource, NULL);
1857 	spin_unlock_irq(&client->lock);
1858 
1859 	return ret;
1860 }
1861 
1862 static int shutdown_resource(int id, void *p, void *data)
1863 {
1864 	struct client_resource *resource = p;
1865 	struct client *client = data;
1866 
1867 	resource->release(client, resource);
1868 	client_put(client);
1869 
1870 	return 0;
1871 }
1872 
1873 static int fw_device_op_release(struct inode *inode, struct file *file)
1874 {
1875 	struct client *client = file->private_data;
1876 	struct event *event, *next_event;
1877 
1878 	spin_lock_irq(&client->device->card->lock);
1879 	list_del(&client->phy_receiver_link);
1880 	spin_unlock_irq(&client->device->card->lock);
1881 
1882 	scoped_guard(mutex, &client->device->client_list_mutex)
1883 		list_del(&client->link);
1884 
1885 	if (client->iso_context)
1886 		fw_iso_context_destroy(client->iso_context);
1887 
1888 	if (client->buffer.pages)
1889 		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1890 
1891 	/* Freeze client->resource_idr and client->event_list */
1892 	spin_lock_irq(&client->lock);
1893 	client->in_shutdown = true;
1894 	spin_unlock_irq(&client->lock);
1895 
1896 	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1897 
1898 	idr_for_each(&client->resource_idr, shutdown_resource, client);
1899 	idr_destroy(&client->resource_idr);
1900 
1901 	list_for_each_entry_safe(event, next_event, &client->event_list, link)
1902 		kfree(event);
1903 
1904 	client_put(client);
1905 
1906 	return 0;
1907 }
1908 
1909 static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1910 {
1911 	struct client *client = file->private_data;
1912 	__poll_t mask = 0;
1913 
1914 	poll_wait(file, &client->wait, pt);
1915 
1916 	if (fw_device_is_shutdown(client->device))
1917 		mask |= EPOLLHUP | EPOLLERR;
1918 	if (!list_empty(&client->event_list))
1919 		mask |= EPOLLIN | EPOLLRDNORM;
1920 
1921 	return mask;
1922 }
1923 
1924 const struct file_operations fw_device_ops = {
1925 	.owner		= THIS_MODULE,
1926 	.llseek		= no_llseek,
1927 	.open		= fw_device_op_open,
1928 	.read		= fw_device_op_read,
1929 	.unlocked_ioctl	= fw_device_op_ioctl,
1930 	.mmap		= fw_device_op_mmap,
1931 	.release	= fw_device_op_release,
1932 	.poll		= fw_device_op_poll,
1933 	.compat_ioctl	= compat_ptr_ioctl,
1934 };
1935