1 /*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 #include <linux/bug.h>
22 #include <linux/compat.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/errno.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-cdev.h>
28 #include <linux/idr.h>
29 #include <linux/irqflags.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/kref.h>
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h> /* required for linux/wait.h */
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/time.h>
42 #include <linux/uaccess.h>
43 #include <linux/vmalloc.h>
44 #include <linux/wait.h>
45 #include <linux/workqueue.h>
46
47
48 #include "core.h"
49
50 /*
51 * ABI version history is documented in linux/firewire-cdev.h.
52 */
53 #define FW_CDEV_KERNEL_VERSION 5
54 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
55 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
56 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
57
58 struct client {
59 u32 version;
60 struct fw_device *device;
61
62 spinlock_t lock;
63 bool in_shutdown;
64 struct idr resource_idr;
65 struct list_head event_list;
66 wait_queue_head_t wait;
67 wait_queue_head_t tx_flush_wait;
68 u64 bus_reset_closure;
69
70 struct fw_iso_context *iso_context;
71 u64 iso_closure;
72 struct fw_iso_buffer buffer;
73 unsigned long vm_start;
74
75 struct list_head phy_receiver_link;
76 u64 phy_receiver_closure;
77
78 struct list_head link;
79 struct kref kref;
80 };
81
client_get(struct client * client)82 static inline void client_get(struct client *client)
83 {
84 kref_get(&client->kref);
85 }
86
client_release(struct kref * kref)87 static void client_release(struct kref *kref)
88 {
89 struct client *client = container_of(kref, struct client, kref);
90
91 fw_device_put(client->device);
92 kfree(client);
93 }
94
client_put(struct client * client)95 static void client_put(struct client *client)
96 {
97 kref_put(&client->kref, client_release);
98 }
99
100 struct client_resource;
101 typedef void (*client_resource_release_fn_t)(struct client *,
102 struct client_resource *);
103 struct client_resource {
104 client_resource_release_fn_t release;
105 int handle;
106 };
107
108 struct address_handler_resource {
109 struct client_resource resource;
110 struct fw_address_handler handler;
111 __u64 closure;
112 struct client *client;
113 };
114
115 struct outbound_transaction_resource {
116 struct client_resource resource;
117 struct fw_transaction transaction;
118 };
119
120 struct inbound_transaction_resource {
121 struct client_resource resource;
122 struct fw_card *card;
123 struct fw_request *request;
124 void *data;
125 size_t length;
126 };
127
128 struct descriptor_resource {
129 struct client_resource resource;
130 struct fw_descriptor descriptor;
131 u32 data[0];
132 };
133
134 struct iso_resource {
135 struct client_resource resource;
136 struct client *client;
137 /* Schedule work and access todo only with client->lock held. */
138 struct delayed_work work;
139 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
140 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
141 int generation;
142 u64 channels;
143 s32 bandwidth;
144 struct iso_resource_event *e_alloc, *e_dealloc;
145 };
146
147 static void release_iso_resource(struct client *, struct client_resource *);
148
schedule_iso_resource(struct iso_resource * r,unsigned long delay)149 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
150 {
151 client_get(r->client);
152 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
153 client_put(r->client);
154 }
155
schedule_if_iso_resource(struct client_resource * resource)156 static void schedule_if_iso_resource(struct client_resource *resource)
157 {
158 if (resource->release == release_iso_resource)
159 schedule_iso_resource(container_of(resource,
160 struct iso_resource, resource), 0);
161 }
162
163 /*
164 * dequeue_event() just kfree()'s the event, so the event has to be
165 * the first field in a struct XYZ_event.
166 */
167 struct event {
168 struct { void *data; size_t size; } v[2];
169 struct list_head link;
170 };
171
172 struct bus_reset_event {
173 struct event event;
174 struct fw_cdev_event_bus_reset reset;
175 };
176
177 struct outbound_transaction_event {
178 struct event event;
179 struct client *client;
180 struct outbound_transaction_resource r;
181 struct fw_cdev_event_response response;
182 };
183
184 struct inbound_transaction_event {
185 struct event event;
186 union {
187 struct fw_cdev_event_request request;
188 struct fw_cdev_event_request2 request2;
189 } req;
190 };
191
192 struct iso_interrupt_event {
193 struct event event;
194 struct fw_cdev_event_iso_interrupt interrupt;
195 };
196
197 struct iso_interrupt_mc_event {
198 struct event event;
199 struct fw_cdev_event_iso_interrupt_mc interrupt;
200 };
201
202 struct iso_resource_event {
203 struct event event;
204 struct fw_cdev_event_iso_resource iso_resource;
205 };
206
207 struct outbound_phy_packet_event {
208 struct event event;
209 struct client *client;
210 struct fw_packet p;
211 struct fw_cdev_event_phy_packet phy_packet;
212 };
213
214 struct inbound_phy_packet_event {
215 struct event event;
216 struct fw_cdev_event_phy_packet phy_packet;
217 };
218
219 #ifdef CONFIG_COMPAT
u64_to_uptr(u64 value)220 static void __user *u64_to_uptr(u64 value)
221 {
222 if (is_compat_task())
223 return compat_ptr(value);
224 else
225 return (void __user *)(unsigned long)value;
226 }
227
uptr_to_u64(void __user * ptr)228 static u64 uptr_to_u64(void __user *ptr)
229 {
230 if (is_compat_task())
231 return ptr_to_compat(ptr);
232 else
233 return (u64)(unsigned long)ptr;
234 }
235 #else
u64_to_uptr(u64 value)236 static inline void __user *u64_to_uptr(u64 value)
237 {
238 return (void __user *)(unsigned long)value;
239 }
240
uptr_to_u64(void __user * ptr)241 static inline u64 uptr_to_u64(void __user *ptr)
242 {
243 return (u64)(unsigned long)ptr;
244 }
245 #endif /* CONFIG_COMPAT */
246
fw_device_op_open(struct inode * inode,struct file * file)247 static int fw_device_op_open(struct inode *inode, struct file *file)
248 {
249 struct fw_device *device;
250 struct client *client;
251
252 device = fw_device_get_by_devt(inode->i_rdev);
253 if (device == NULL)
254 return -ENODEV;
255
256 if (fw_device_is_shutdown(device)) {
257 fw_device_put(device);
258 return -ENODEV;
259 }
260
261 client = kzalloc(sizeof(*client), GFP_KERNEL);
262 if (client == NULL) {
263 fw_device_put(device);
264 return -ENOMEM;
265 }
266
267 client->device = device;
268 spin_lock_init(&client->lock);
269 idr_init(&client->resource_idr);
270 INIT_LIST_HEAD(&client->event_list);
271 init_waitqueue_head(&client->wait);
272 init_waitqueue_head(&client->tx_flush_wait);
273 INIT_LIST_HEAD(&client->phy_receiver_link);
274 INIT_LIST_HEAD(&client->link);
275 kref_init(&client->kref);
276
277 file->private_data = client;
278
279 return nonseekable_open(inode, file);
280 }
281
queue_event(struct client * client,struct event * event,void * data0,size_t size0,void * data1,size_t size1)282 static void queue_event(struct client *client, struct event *event,
283 void *data0, size_t size0, void *data1, size_t size1)
284 {
285 unsigned long flags;
286
287 event->v[0].data = data0;
288 event->v[0].size = size0;
289 event->v[1].data = data1;
290 event->v[1].size = size1;
291
292 spin_lock_irqsave(&client->lock, flags);
293 if (client->in_shutdown)
294 kfree(event);
295 else
296 list_add_tail(&event->link, &client->event_list);
297 spin_unlock_irqrestore(&client->lock, flags);
298
299 wake_up_interruptible(&client->wait);
300 }
301
dequeue_event(struct client * client,char __user * buffer,size_t count)302 static int dequeue_event(struct client *client,
303 char __user *buffer, size_t count)
304 {
305 struct event *event;
306 size_t size, total;
307 int i, ret;
308
309 ret = wait_event_interruptible(client->wait,
310 !list_empty(&client->event_list) ||
311 fw_device_is_shutdown(client->device));
312 if (ret < 0)
313 return ret;
314
315 if (list_empty(&client->event_list) &&
316 fw_device_is_shutdown(client->device))
317 return -ENODEV;
318
319 spin_lock_irq(&client->lock);
320 event = list_first_entry(&client->event_list, struct event, link);
321 list_del(&event->link);
322 spin_unlock_irq(&client->lock);
323
324 total = 0;
325 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
326 size = min(event->v[i].size, count - total);
327 if (copy_to_user(buffer + total, event->v[i].data, size)) {
328 ret = -EFAULT;
329 goto out;
330 }
331 total += size;
332 }
333 ret = total;
334
335 out:
336 kfree(event);
337
338 return ret;
339 }
340
fw_device_op_read(struct file * file,char __user * buffer,size_t count,loff_t * offset)341 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
342 size_t count, loff_t *offset)
343 {
344 struct client *client = file->private_data;
345
346 return dequeue_event(client, buffer, count);
347 }
348
fill_bus_reset_event(struct fw_cdev_event_bus_reset * event,struct client * client)349 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
350 struct client *client)
351 {
352 struct fw_card *card = client->device->card;
353
354 spin_lock_irq(&card->lock);
355
356 event->closure = client->bus_reset_closure;
357 event->type = FW_CDEV_EVENT_BUS_RESET;
358 event->generation = client->device->generation;
359 event->node_id = client->device->node_id;
360 event->local_node_id = card->local_node->node_id;
361 event->bm_node_id = card->bm_node_id;
362 event->irm_node_id = card->irm_node->node_id;
363 event->root_node_id = card->root_node->node_id;
364
365 spin_unlock_irq(&card->lock);
366 }
367
for_each_client(struct fw_device * device,void (* callback)(struct client * client))368 static void for_each_client(struct fw_device *device,
369 void (*callback)(struct client *client))
370 {
371 struct client *c;
372
373 mutex_lock(&device->client_list_mutex);
374 list_for_each_entry(c, &device->client_list, link)
375 callback(c);
376 mutex_unlock(&device->client_list_mutex);
377 }
378
schedule_reallocations(int id,void * p,void * data)379 static int schedule_reallocations(int id, void *p, void *data)
380 {
381 schedule_if_iso_resource(p);
382
383 return 0;
384 }
385
queue_bus_reset_event(struct client * client)386 static void queue_bus_reset_event(struct client *client)
387 {
388 struct bus_reset_event *e;
389
390 e = kzalloc(sizeof(*e), GFP_KERNEL);
391 if (e == NULL) {
392 fw_notice(client->device->card, "out of memory when allocating event\n");
393 return;
394 }
395
396 fill_bus_reset_event(&e->reset, client);
397
398 queue_event(client, &e->event,
399 &e->reset, sizeof(e->reset), NULL, 0);
400
401 spin_lock_irq(&client->lock);
402 idr_for_each(&client->resource_idr, schedule_reallocations, client);
403 spin_unlock_irq(&client->lock);
404 }
405
fw_device_cdev_update(struct fw_device * device)406 void fw_device_cdev_update(struct fw_device *device)
407 {
408 for_each_client(device, queue_bus_reset_event);
409 }
410
wake_up_client(struct client * client)411 static void wake_up_client(struct client *client)
412 {
413 wake_up_interruptible(&client->wait);
414 }
415
fw_device_cdev_remove(struct fw_device * device)416 void fw_device_cdev_remove(struct fw_device *device)
417 {
418 for_each_client(device, wake_up_client);
419 }
420
421 union ioctl_arg {
422 struct fw_cdev_get_info get_info;
423 struct fw_cdev_send_request send_request;
424 struct fw_cdev_allocate allocate;
425 struct fw_cdev_deallocate deallocate;
426 struct fw_cdev_send_response send_response;
427 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
428 struct fw_cdev_add_descriptor add_descriptor;
429 struct fw_cdev_remove_descriptor remove_descriptor;
430 struct fw_cdev_create_iso_context create_iso_context;
431 struct fw_cdev_queue_iso queue_iso;
432 struct fw_cdev_start_iso start_iso;
433 struct fw_cdev_stop_iso stop_iso;
434 struct fw_cdev_get_cycle_timer get_cycle_timer;
435 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
436 struct fw_cdev_send_stream_packet send_stream_packet;
437 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
438 struct fw_cdev_send_phy_packet send_phy_packet;
439 struct fw_cdev_receive_phy_packets receive_phy_packets;
440 struct fw_cdev_set_iso_channels set_iso_channels;
441 struct fw_cdev_flush_iso flush_iso;
442 };
443
ioctl_get_info(struct client * client,union ioctl_arg * arg)444 static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
445 {
446 struct fw_cdev_get_info *a = &arg->get_info;
447 struct fw_cdev_event_bus_reset bus_reset;
448 unsigned long ret = 0;
449
450 client->version = a->version;
451 a->version = FW_CDEV_KERNEL_VERSION;
452 a->card = client->device->card->index;
453
454 down_read(&fw_device_rwsem);
455
456 if (a->rom != 0) {
457 size_t want = a->rom_length;
458 size_t have = client->device->config_rom_length * 4;
459
460 ret = copy_to_user(u64_to_uptr(a->rom),
461 client->device->config_rom, min(want, have));
462 }
463 a->rom_length = client->device->config_rom_length * 4;
464
465 up_read(&fw_device_rwsem);
466
467 if (ret != 0)
468 return -EFAULT;
469
470 mutex_lock(&client->device->client_list_mutex);
471
472 client->bus_reset_closure = a->bus_reset_closure;
473 if (a->bus_reset != 0) {
474 fill_bus_reset_event(&bus_reset, client);
475 /* unaligned size of bus_reset is 36 bytes */
476 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
477 }
478 if (ret == 0 && list_empty(&client->link))
479 list_add_tail(&client->link, &client->device->client_list);
480
481 mutex_unlock(&client->device->client_list_mutex);
482
483 return ret ? -EFAULT : 0;
484 }
485
add_client_resource(struct client * client,struct client_resource * resource,gfp_t gfp_mask)486 static int add_client_resource(struct client *client,
487 struct client_resource *resource, gfp_t gfp_mask)
488 {
489 unsigned long flags;
490 int ret;
491
492 retry:
493 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
494 return -ENOMEM;
495
496 spin_lock_irqsave(&client->lock, flags);
497 if (client->in_shutdown)
498 ret = -ECANCELED;
499 else
500 ret = idr_get_new(&client->resource_idr, resource,
501 &resource->handle);
502 if (ret >= 0) {
503 client_get(client);
504 schedule_if_iso_resource(resource);
505 }
506 spin_unlock_irqrestore(&client->lock, flags);
507
508 if (ret == -EAGAIN)
509 goto retry;
510
511 return ret < 0 ? ret : 0;
512 }
513
release_client_resource(struct client * client,u32 handle,client_resource_release_fn_t release,struct client_resource ** return_resource)514 static int release_client_resource(struct client *client, u32 handle,
515 client_resource_release_fn_t release,
516 struct client_resource **return_resource)
517 {
518 struct client_resource *resource;
519
520 spin_lock_irq(&client->lock);
521 if (client->in_shutdown)
522 resource = NULL;
523 else
524 resource = idr_find(&client->resource_idr, handle);
525 if (resource && resource->release == release)
526 idr_remove(&client->resource_idr, handle);
527 spin_unlock_irq(&client->lock);
528
529 if (!(resource && resource->release == release))
530 return -EINVAL;
531
532 if (return_resource)
533 *return_resource = resource;
534 else
535 resource->release(client, resource);
536
537 client_put(client);
538
539 return 0;
540 }
541
release_transaction(struct client * client,struct client_resource * resource)542 static void release_transaction(struct client *client,
543 struct client_resource *resource)
544 {
545 }
546
complete_transaction(struct fw_card * card,int rcode,void * payload,size_t length,void * data)547 static void complete_transaction(struct fw_card *card, int rcode,
548 void *payload, size_t length, void *data)
549 {
550 struct outbound_transaction_event *e = data;
551 struct fw_cdev_event_response *rsp = &e->response;
552 struct client *client = e->client;
553 unsigned long flags;
554
555 if (length < rsp->length)
556 rsp->length = length;
557 if (rcode == RCODE_COMPLETE)
558 memcpy(rsp->data, payload, rsp->length);
559
560 spin_lock_irqsave(&client->lock, flags);
561 idr_remove(&client->resource_idr, e->r.resource.handle);
562 if (client->in_shutdown)
563 wake_up(&client->tx_flush_wait);
564 spin_unlock_irqrestore(&client->lock, flags);
565
566 rsp->type = FW_CDEV_EVENT_RESPONSE;
567 rsp->rcode = rcode;
568
569 /*
570 * In the case that sizeof(*rsp) doesn't align with the position of the
571 * data, and the read is short, preserve an extra copy of the data
572 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
573 * for short reads and some apps depended on it, this is both safe
574 * and prudent for compatibility.
575 */
576 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
577 queue_event(client, &e->event, rsp, sizeof(*rsp),
578 rsp->data, rsp->length);
579 else
580 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
581 NULL, 0);
582
583 /* Drop the idr's reference */
584 client_put(client);
585 }
586
init_request(struct client * client,struct fw_cdev_send_request * request,int destination_id,int speed)587 static int init_request(struct client *client,
588 struct fw_cdev_send_request *request,
589 int destination_id, int speed)
590 {
591 struct outbound_transaction_event *e;
592 int ret;
593
594 if (request->tcode != TCODE_STREAM_DATA &&
595 (request->length > 4096 || request->length > 512 << speed))
596 return -EIO;
597
598 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
599 request->length < 4)
600 return -EINVAL;
601
602 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
603 if (e == NULL)
604 return -ENOMEM;
605
606 e->client = client;
607 e->response.length = request->length;
608 e->response.closure = request->closure;
609
610 if (request->data &&
611 copy_from_user(e->response.data,
612 u64_to_uptr(request->data), request->length)) {
613 ret = -EFAULT;
614 goto failed;
615 }
616
617 e->r.resource.release = release_transaction;
618 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
619 if (ret < 0)
620 goto failed;
621
622 fw_send_request(client->device->card, &e->r.transaction,
623 request->tcode, destination_id, request->generation,
624 speed, request->offset, e->response.data,
625 request->length, complete_transaction, e);
626 return 0;
627
628 failed:
629 kfree(e);
630
631 return ret;
632 }
633
ioctl_send_request(struct client * client,union ioctl_arg * arg)634 static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
635 {
636 switch (arg->send_request.tcode) {
637 case TCODE_WRITE_QUADLET_REQUEST:
638 case TCODE_WRITE_BLOCK_REQUEST:
639 case TCODE_READ_QUADLET_REQUEST:
640 case TCODE_READ_BLOCK_REQUEST:
641 case TCODE_LOCK_MASK_SWAP:
642 case TCODE_LOCK_COMPARE_SWAP:
643 case TCODE_LOCK_FETCH_ADD:
644 case TCODE_LOCK_LITTLE_ADD:
645 case TCODE_LOCK_BOUNDED_ADD:
646 case TCODE_LOCK_WRAP_ADD:
647 case TCODE_LOCK_VENDOR_DEPENDENT:
648 break;
649 default:
650 return -EINVAL;
651 }
652
653 return init_request(client, &arg->send_request, client->device->node_id,
654 client->device->max_speed);
655 }
656
is_fcp_request(struct fw_request * request)657 static inline bool is_fcp_request(struct fw_request *request)
658 {
659 return request == NULL;
660 }
661
release_request(struct client * client,struct client_resource * resource)662 static void release_request(struct client *client,
663 struct client_resource *resource)
664 {
665 struct inbound_transaction_resource *r = container_of(resource,
666 struct inbound_transaction_resource, resource);
667
668 if (is_fcp_request(r->request))
669 kfree(r->data);
670 else
671 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
672
673 fw_card_put(r->card);
674 kfree(r);
675 }
676
handle_request(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)677 static void handle_request(struct fw_card *card, struct fw_request *request,
678 int tcode, int destination, int source,
679 int generation, unsigned long long offset,
680 void *payload, size_t length, void *callback_data)
681 {
682 struct address_handler_resource *handler = callback_data;
683 struct inbound_transaction_resource *r;
684 struct inbound_transaction_event *e;
685 size_t event_size0;
686 void *fcp_frame = NULL;
687 int ret;
688
689 /* card may be different from handler->client->device->card */
690 fw_card_get(card);
691
692 r = kmalloc(sizeof(*r), GFP_ATOMIC);
693 e = kmalloc(sizeof(*e), GFP_ATOMIC);
694 if (r == NULL || e == NULL) {
695 fw_notice(card, "out of memory when allocating event\n");
696 goto failed;
697 }
698 r->card = card;
699 r->request = request;
700 r->data = payload;
701 r->length = length;
702
703 if (is_fcp_request(request)) {
704 /*
705 * FIXME: Let core-transaction.c manage a
706 * single reference-counted copy?
707 */
708 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
709 if (fcp_frame == NULL)
710 goto failed;
711
712 r->data = fcp_frame;
713 }
714
715 r->resource.release = release_request;
716 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
717 if (ret < 0)
718 goto failed;
719
720 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
721 struct fw_cdev_event_request *req = &e->req.request;
722
723 if (tcode & 0x10)
724 tcode = TCODE_LOCK_REQUEST;
725
726 req->type = FW_CDEV_EVENT_REQUEST;
727 req->tcode = tcode;
728 req->offset = offset;
729 req->length = length;
730 req->handle = r->resource.handle;
731 req->closure = handler->closure;
732 event_size0 = sizeof(*req);
733 } else {
734 struct fw_cdev_event_request2 *req = &e->req.request2;
735
736 req->type = FW_CDEV_EVENT_REQUEST2;
737 req->tcode = tcode;
738 req->offset = offset;
739 req->source_node_id = source;
740 req->destination_node_id = destination;
741 req->card = card->index;
742 req->generation = generation;
743 req->length = length;
744 req->handle = r->resource.handle;
745 req->closure = handler->closure;
746 event_size0 = sizeof(*req);
747 }
748
749 queue_event(handler->client, &e->event,
750 &e->req, event_size0, r->data, length);
751 return;
752
753 failed:
754 kfree(r);
755 kfree(e);
756 kfree(fcp_frame);
757
758 if (!is_fcp_request(request))
759 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
760
761 fw_card_put(card);
762 }
763
release_address_handler(struct client * client,struct client_resource * resource)764 static void release_address_handler(struct client *client,
765 struct client_resource *resource)
766 {
767 struct address_handler_resource *r =
768 container_of(resource, struct address_handler_resource, resource);
769
770 fw_core_remove_address_handler(&r->handler);
771 kfree(r);
772 }
773
ioctl_allocate(struct client * client,union ioctl_arg * arg)774 static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
775 {
776 struct fw_cdev_allocate *a = &arg->allocate;
777 struct address_handler_resource *r;
778 struct fw_address_region region;
779 int ret;
780
781 r = kmalloc(sizeof(*r), GFP_KERNEL);
782 if (r == NULL)
783 return -ENOMEM;
784
785 region.start = a->offset;
786 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
787 region.end = a->offset + a->length;
788 else
789 region.end = a->region_end;
790
791 r->handler.length = a->length;
792 r->handler.address_callback = handle_request;
793 r->handler.callback_data = r;
794 r->closure = a->closure;
795 r->client = client;
796
797 ret = fw_core_add_address_handler(&r->handler, ®ion);
798 if (ret < 0) {
799 kfree(r);
800 return ret;
801 }
802 a->offset = r->handler.offset;
803
804 r->resource.release = release_address_handler;
805 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
806 if (ret < 0) {
807 release_address_handler(client, &r->resource);
808 return ret;
809 }
810 a->handle = r->resource.handle;
811
812 return 0;
813 }
814
ioctl_deallocate(struct client * client,union ioctl_arg * arg)815 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
816 {
817 return release_client_resource(client, arg->deallocate.handle,
818 release_address_handler, NULL);
819 }
820
ioctl_send_response(struct client * client,union ioctl_arg * arg)821 static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
822 {
823 struct fw_cdev_send_response *a = &arg->send_response;
824 struct client_resource *resource;
825 struct inbound_transaction_resource *r;
826 int ret = 0;
827
828 if (release_client_resource(client, a->handle,
829 release_request, &resource) < 0)
830 return -EINVAL;
831
832 r = container_of(resource, struct inbound_transaction_resource,
833 resource);
834 if (is_fcp_request(r->request))
835 goto out;
836
837 if (a->length != fw_get_response_length(r->request)) {
838 ret = -EINVAL;
839 kfree(r->request);
840 goto out;
841 }
842 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
843 ret = -EFAULT;
844 kfree(r->request);
845 goto out;
846 }
847 fw_send_response(r->card, r->request, a->rcode);
848 out:
849 fw_card_put(r->card);
850 kfree(r);
851
852 return ret;
853 }
854
ioctl_initiate_bus_reset(struct client * client,union ioctl_arg * arg)855 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
856 {
857 fw_schedule_bus_reset(client->device->card, true,
858 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
859 return 0;
860 }
861
release_descriptor(struct client * client,struct client_resource * resource)862 static void release_descriptor(struct client *client,
863 struct client_resource *resource)
864 {
865 struct descriptor_resource *r =
866 container_of(resource, struct descriptor_resource, resource);
867
868 fw_core_remove_descriptor(&r->descriptor);
869 kfree(r);
870 }
871
ioctl_add_descriptor(struct client * client,union ioctl_arg * arg)872 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
873 {
874 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
875 struct descriptor_resource *r;
876 int ret;
877
878 /* Access policy: Allow this ioctl only on local nodes' device files. */
879 if (!client->device->is_local)
880 return -ENOSYS;
881
882 if (a->length > 256)
883 return -EINVAL;
884
885 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
886 if (r == NULL)
887 return -ENOMEM;
888
889 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
890 ret = -EFAULT;
891 goto failed;
892 }
893
894 r->descriptor.length = a->length;
895 r->descriptor.immediate = a->immediate;
896 r->descriptor.key = a->key;
897 r->descriptor.data = r->data;
898
899 ret = fw_core_add_descriptor(&r->descriptor);
900 if (ret < 0)
901 goto failed;
902
903 r->resource.release = release_descriptor;
904 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
905 if (ret < 0) {
906 fw_core_remove_descriptor(&r->descriptor);
907 goto failed;
908 }
909 a->handle = r->resource.handle;
910
911 return 0;
912 failed:
913 kfree(r);
914
915 return ret;
916 }
917
ioctl_remove_descriptor(struct client * client,union ioctl_arg * arg)918 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
919 {
920 return release_client_resource(client, arg->remove_descriptor.handle,
921 release_descriptor, NULL);
922 }
923
iso_callback(struct fw_iso_context * context,u32 cycle,size_t header_length,void * header,void * data)924 static void iso_callback(struct fw_iso_context *context, u32 cycle,
925 size_t header_length, void *header, void *data)
926 {
927 struct client *client = data;
928 struct iso_interrupt_event *e;
929
930 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
931 if (e == NULL) {
932 fw_notice(context->card, "out of memory when allocating event\n");
933 return;
934 }
935 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
936 e->interrupt.closure = client->iso_closure;
937 e->interrupt.cycle = cycle;
938 e->interrupt.header_length = header_length;
939 memcpy(e->interrupt.header, header, header_length);
940 queue_event(client, &e->event, &e->interrupt,
941 sizeof(e->interrupt) + header_length, NULL, 0);
942 }
943
iso_mc_callback(struct fw_iso_context * context,dma_addr_t completed,void * data)944 static void iso_mc_callback(struct fw_iso_context *context,
945 dma_addr_t completed, void *data)
946 {
947 struct client *client = data;
948 struct iso_interrupt_mc_event *e;
949
950 e = kmalloc(sizeof(*e), GFP_ATOMIC);
951 if (e == NULL) {
952 fw_notice(context->card, "out of memory when allocating event\n");
953 return;
954 }
955 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
956 e->interrupt.closure = client->iso_closure;
957 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
958 completed);
959 queue_event(client, &e->event, &e->interrupt,
960 sizeof(e->interrupt), NULL, 0);
961 }
962
ioctl_create_iso_context(struct client * client,union ioctl_arg * arg)963 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
964 {
965 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
966 struct fw_iso_context *context;
967 fw_iso_callback_t cb;
968
969 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
970 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
971 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
972 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
973
974 switch (a->type) {
975 case FW_ISO_CONTEXT_TRANSMIT:
976 if (a->speed > SCODE_3200 || a->channel > 63)
977 return -EINVAL;
978
979 cb = iso_callback;
980 break;
981
982 case FW_ISO_CONTEXT_RECEIVE:
983 if (a->header_size < 4 || (a->header_size & 3) ||
984 a->channel > 63)
985 return -EINVAL;
986
987 cb = iso_callback;
988 break;
989
990 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
991 cb = (fw_iso_callback_t)iso_mc_callback;
992 break;
993
994 default:
995 return -EINVAL;
996 }
997
998 context = fw_iso_context_create(client->device->card, a->type,
999 a->channel, a->speed, a->header_size, cb, client);
1000 if (IS_ERR(context))
1001 return PTR_ERR(context);
1002 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1003 context->drop_overflow_headers = true;
1004
1005 /* We only support one context at this time. */
1006 spin_lock_irq(&client->lock);
1007 if (client->iso_context != NULL) {
1008 spin_unlock_irq(&client->lock);
1009 fw_iso_context_destroy(context);
1010 return -EBUSY;
1011 }
1012 client->iso_closure = a->closure;
1013 client->iso_context = context;
1014 spin_unlock_irq(&client->lock);
1015
1016 a->handle = 0;
1017
1018 return 0;
1019 }
1020
ioctl_set_iso_channels(struct client * client,union ioctl_arg * arg)1021 static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1022 {
1023 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1024 struct fw_iso_context *ctx = client->iso_context;
1025
1026 if (ctx == NULL || a->handle != 0)
1027 return -EINVAL;
1028
1029 return fw_iso_context_set_channels(ctx, &a->channels);
1030 }
1031
1032 /* Macros for decoding the iso packet control header. */
1033 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1034 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1035 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1036 #define GET_TAG(v) (((v) >> 18) & 0x03)
1037 #define GET_SY(v) (((v) >> 20) & 0x0f)
1038 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1039
ioctl_queue_iso(struct client * client,union ioctl_arg * arg)1040 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1041 {
1042 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1043 struct fw_cdev_iso_packet __user *p, *end, *next;
1044 struct fw_iso_context *ctx = client->iso_context;
1045 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1046 u32 control;
1047 int count;
1048 struct {
1049 struct fw_iso_packet packet;
1050 u8 header[256];
1051 } u;
1052
1053 if (ctx == NULL || a->handle != 0)
1054 return -EINVAL;
1055
1056 /*
1057 * If the user passes a non-NULL data pointer, has mmap()'ed
1058 * the iso buffer, and the pointer points inside the buffer,
1059 * we setup the payload pointers accordingly. Otherwise we
1060 * set them both to 0, which will still let packets with
1061 * payload_length == 0 through. In other words, if no packets
1062 * use the indirect payload, the iso buffer need not be mapped
1063 * and the a->data pointer is ignored.
1064 */
1065 payload = (unsigned long)a->data - client->vm_start;
1066 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1067 if (a->data == 0 || client->buffer.pages == NULL ||
1068 payload >= buffer_end) {
1069 payload = 0;
1070 buffer_end = 0;
1071 }
1072
1073 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1074 return -EINVAL;
1075
1076 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1077 if (!access_ok(VERIFY_READ, p, a->size))
1078 return -EFAULT;
1079
1080 end = (void __user *)p + a->size;
1081 count = 0;
1082 while (p < end) {
1083 if (get_user(control, &p->control))
1084 return -EFAULT;
1085 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1086 u.packet.interrupt = GET_INTERRUPT(control);
1087 u.packet.skip = GET_SKIP(control);
1088 u.packet.tag = GET_TAG(control);
1089 u.packet.sy = GET_SY(control);
1090 u.packet.header_length = GET_HEADER_LENGTH(control);
1091
1092 switch (ctx->type) {
1093 case FW_ISO_CONTEXT_TRANSMIT:
1094 if (u.packet.header_length & 3)
1095 return -EINVAL;
1096 transmit_header_bytes = u.packet.header_length;
1097 break;
1098
1099 case FW_ISO_CONTEXT_RECEIVE:
1100 if (u.packet.header_length == 0 ||
1101 u.packet.header_length % ctx->header_size != 0)
1102 return -EINVAL;
1103 break;
1104
1105 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1106 if (u.packet.payload_length == 0 ||
1107 u.packet.payload_length & 3)
1108 return -EINVAL;
1109 break;
1110 }
1111
1112 next = (struct fw_cdev_iso_packet __user *)
1113 &p->header[transmit_header_bytes / 4];
1114 if (next > end)
1115 return -EINVAL;
1116 if (__copy_from_user
1117 (u.packet.header, p->header, transmit_header_bytes))
1118 return -EFAULT;
1119 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1120 u.packet.header_length + u.packet.payload_length > 0)
1121 return -EINVAL;
1122 if (payload + u.packet.payload_length > buffer_end)
1123 return -EINVAL;
1124
1125 if (fw_iso_context_queue(ctx, &u.packet,
1126 &client->buffer, payload))
1127 break;
1128
1129 p = next;
1130 payload += u.packet.payload_length;
1131 count++;
1132 }
1133 fw_iso_context_queue_flush(ctx);
1134
1135 a->size -= uptr_to_u64(p) - a->packets;
1136 a->packets = uptr_to_u64(p);
1137 a->data = client->vm_start + payload;
1138
1139 return count;
1140 }
1141
ioctl_start_iso(struct client * client,union ioctl_arg * arg)1142 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1143 {
1144 struct fw_cdev_start_iso *a = &arg->start_iso;
1145
1146 BUILD_BUG_ON(
1147 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1148 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1149 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1150 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1151 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1152
1153 if (client->iso_context == NULL || a->handle != 0)
1154 return -EINVAL;
1155
1156 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1157 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1158 return -EINVAL;
1159
1160 return fw_iso_context_start(client->iso_context,
1161 a->cycle, a->sync, a->tags);
1162 }
1163
ioctl_stop_iso(struct client * client,union ioctl_arg * arg)1164 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1165 {
1166 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1167
1168 if (client->iso_context == NULL || a->handle != 0)
1169 return -EINVAL;
1170
1171 return fw_iso_context_stop(client->iso_context);
1172 }
1173
ioctl_flush_iso(struct client * client,union ioctl_arg * arg)1174 static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1175 {
1176 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1177
1178 if (client->iso_context == NULL || a->handle != 0)
1179 return -EINVAL;
1180
1181 return fw_iso_context_flush_completions(client->iso_context);
1182 }
1183
ioctl_get_cycle_timer2(struct client * client,union ioctl_arg * arg)1184 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1185 {
1186 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1187 struct fw_card *card = client->device->card;
1188 struct timespec ts = {0, 0};
1189 u32 cycle_time;
1190 int ret = 0;
1191
1192 local_irq_disable();
1193
1194 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1195
1196 switch (a->clk_id) {
1197 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1198 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1199 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1200 default:
1201 ret = -EINVAL;
1202 }
1203
1204 local_irq_enable();
1205
1206 a->tv_sec = ts.tv_sec;
1207 a->tv_nsec = ts.tv_nsec;
1208 a->cycle_timer = cycle_time;
1209
1210 return ret;
1211 }
1212
ioctl_get_cycle_timer(struct client * client,union ioctl_arg * arg)1213 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1214 {
1215 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1216 struct fw_cdev_get_cycle_timer2 ct2;
1217
1218 ct2.clk_id = CLOCK_REALTIME;
1219 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1220
1221 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1222 a->cycle_timer = ct2.cycle_timer;
1223
1224 return 0;
1225 }
1226
iso_resource_work(struct work_struct * work)1227 static void iso_resource_work(struct work_struct *work)
1228 {
1229 struct iso_resource_event *e;
1230 struct iso_resource *r =
1231 container_of(work, struct iso_resource, work.work);
1232 struct client *client = r->client;
1233 int generation, channel, bandwidth, todo;
1234 bool skip, free, success;
1235
1236 spin_lock_irq(&client->lock);
1237 generation = client->device->generation;
1238 todo = r->todo;
1239 /* Allow 1000ms grace period for other reallocations. */
1240 if (todo == ISO_RES_ALLOC &&
1241 time_before64(get_jiffies_64(),
1242 client->device->card->reset_jiffies + HZ)) {
1243 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1244 skip = true;
1245 } else {
1246 /* We could be called twice within the same generation. */
1247 skip = todo == ISO_RES_REALLOC &&
1248 r->generation == generation;
1249 }
1250 free = todo == ISO_RES_DEALLOC ||
1251 todo == ISO_RES_ALLOC_ONCE ||
1252 todo == ISO_RES_DEALLOC_ONCE;
1253 r->generation = generation;
1254 spin_unlock_irq(&client->lock);
1255
1256 if (skip)
1257 goto out;
1258
1259 bandwidth = r->bandwidth;
1260
1261 fw_iso_resource_manage(client->device->card, generation,
1262 r->channels, &channel, &bandwidth,
1263 todo == ISO_RES_ALLOC ||
1264 todo == ISO_RES_REALLOC ||
1265 todo == ISO_RES_ALLOC_ONCE);
1266 /*
1267 * Is this generation outdated already? As long as this resource sticks
1268 * in the idr, it will be scheduled again for a newer generation or at
1269 * shutdown.
1270 */
1271 if (channel == -EAGAIN &&
1272 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1273 goto out;
1274
1275 success = channel >= 0 || bandwidth > 0;
1276
1277 spin_lock_irq(&client->lock);
1278 /*
1279 * Transit from allocation to reallocation, except if the client
1280 * requested deallocation in the meantime.
1281 */
1282 if (r->todo == ISO_RES_ALLOC)
1283 r->todo = ISO_RES_REALLOC;
1284 /*
1285 * Allocation or reallocation failure? Pull this resource out of the
1286 * idr and prepare for deletion, unless the client is shutting down.
1287 */
1288 if (r->todo == ISO_RES_REALLOC && !success &&
1289 !client->in_shutdown &&
1290 idr_find(&client->resource_idr, r->resource.handle)) {
1291 idr_remove(&client->resource_idr, r->resource.handle);
1292 client_put(client);
1293 free = true;
1294 }
1295 spin_unlock_irq(&client->lock);
1296
1297 if (todo == ISO_RES_ALLOC && channel >= 0)
1298 r->channels = 1ULL << channel;
1299
1300 if (todo == ISO_RES_REALLOC && success)
1301 goto out;
1302
1303 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1304 e = r->e_alloc;
1305 r->e_alloc = NULL;
1306 } else {
1307 e = r->e_dealloc;
1308 r->e_dealloc = NULL;
1309 }
1310 e->iso_resource.handle = r->resource.handle;
1311 e->iso_resource.channel = channel;
1312 e->iso_resource.bandwidth = bandwidth;
1313
1314 queue_event(client, &e->event,
1315 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1316
1317 if (free) {
1318 cancel_delayed_work(&r->work);
1319 kfree(r->e_alloc);
1320 kfree(r->e_dealloc);
1321 kfree(r);
1322 }
1323 out:
1324 client_put(client);
1325 }
1326
release_iso_resource(struct client * client,struct client_resource * resource)1327 static void release_iso_resource(struct client *client,
1328 struct client_resource *resource)
1329 {
1330 struct iso_resource *r =
1331 container_of(resource, struct iso_resource, resource);
1332
1333 spin_lock_irq(&client->lock);
1334 r->todo = ISO_RES_DEALLOC;
1335 schedule_iso_resource(r, 0);
1336 spin_unlock_irq(&client->lock);
1337 }
1338
init_iso_resource(struct client * client,struct fw_cdev_allocate_iso_resource * request,int todo)1339 static int init_iso_resource(struct client *client,
1340 struct fw_cdev_allocate_iso_resource *request, int todo)
1341 {
1342 struct iso_resource_event *e1, *e2;
1343 struct iso_resource *r;
1344 int ret;
1345
1346 if ((request->channels == 0 && request->bandwidth == 0) ||
1347 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1348 request->bandwidth < 0)
1349 return -EINVAL;
1350
1351 r = kmalloc(sizeof(*r), GFP_KERNEL);
1352 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1353 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1354 if (r == NULL || e1 == NULL || e2 == NULL) {
1355 ret = -ENOMEM;
1356 goto fail;
1357 }
1358
1359 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1360 r->client = client;
1361 r->todo = todo;
1362 r->generation = -1;
1363 r->channels = request->channels;
1364 r->bandwidth = request->bandwidth;
1365 r->e_alloc = e1;
1366 r->e_dealloc = e2;
1367
1368 e1->iso_resource.closure = request->closure;
1369 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1370 e2->iso_resource.closure = request->closure;
1371 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1372
1373 if (todo == ISO_RES_ALLOC) {
1374 r->resource.release = release_iso_resource;
1375 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1376 if (ret < 0)
1377 goto fail;
1378 } else {
1379 r->resource.release = NULL;
1380 r->resource.handle = -1;
1381 schedule_iso_resource(r, 0);
1382 }
1383 request->handle = r->resource.handle;
1384
1385 return 0;
1386 fail:
1387 kfree(r);
1388 kfree(e1);
1389 kfree(e2);
1390
1391 return ret;
1392 }
1393
ioctl_allocate_iso_resource(struct client * client,union ioctl_arg * arg)1394 static int ioctl_allocate_iso_resource(struct client *client,
1395 union ioctl_arg *arg)
1396 {
1397 return init_iso_resource(client,
1398 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1399 }
1400
ioctl_deallocate_iso_resource(struct client * client,union ioctl_arg * arg)1401 static int ioctl_deallocate_iso_resource(struct client *client,
1402 union ioctl_arg *arg)
1403 {
1404 return release_client_resource(client,
1405 arg->deallocate.handle, release_iso_resource, NULL);
1406 }
1407
ioctl_allocate_iso_resource_once(struct client * client,union ioctl_arg * arg)1408 static int ioctl_allocate_iso_resource_once(struct client *client,
1409 union ioctl_arg *arg)
1410 {
1411 return init_iso_resource(client,
1412 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1413 }
1414
ioctl_deallocate_iso_resource_once(struct client * client,union ioctl_arg * arg)1415 static int ioctl_deallocate_iso_resource_once(struct client *client,
1416 union ioctl_arg *arg)
1417 {
1418 return init_iso_resource(client,
1419 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1420 }
1421
1422 /*
1423 * Returns a speed code: Maximum speed to or from this device,
1424 * limited by the device's link speed, the local node's link speed,
1425 * and all PHY port speeds between the two links.
1426 */
ioctl_get_speed(struct client * client,union ioctl_arg * arg)1427 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1428 {
1429 return client->device->max_speed;
1430 }
1431
ioctl_send_broadcast_request(struct client * client,union ioctl_arg * arg)1432 static int ioctl_send_broadcast_request(struct client *client,
1433 union ioctl_arg *arg)
1434 {
1435 struct fw_cdev_send_request *a = &arg->send_request;
1436
1437 switch (a->tcode) {
1438 case TCODE_WRITE_QUADLET_REQUEST:
1439 case TCODE_WRITE_BLOCK_REQUEST:
1440 break;
1441 default:
1442 return -EINVAL;
1443 }
1444
1445 /* Security policy: Only allow accesses to Units Space. */
1446 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1447 return -EACCES;
1448
1449 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1450 }
1451
ioctl_send_stream_packet(struct client * client,union ioctl_arg * arg)1452 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1453 {
1454 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1455 struct fw_cdev_send_request request;
1456 int dest;
1457
1458 if (a->speed > client->device->card->link_speed ||
1459 a->length > 1024 << a->speed)
1460 return -EIO;
1461
1462 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1463 return -EINVAL;
1464
1465 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1466 request.tcode = TCODE_STREAM_DATA;
1467 request.length = a->length;
1468 request.closure = a->closure;
1469 request.data = a->data;
1470 request.generation = a->generation;
1471
1472 return init_request(client, &request, dest, a->speed);
1473 }
1474
outbound_phy_packet_callback(struct fw_packet * packet,struct fw_card * card,int status)1475 static void outbound_phy_packet_callback(struct fw_packet *packet,
1476 struct fw_card *card, int status)
1477 {
1478 struct outbound_phy_packet_event *e =
1479 container_of(packet, struct outbound_phy_packet_event, p);
1480
1481 switch (status) {
1482 /* expected: */
1483 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1484 /* should never happen with PHY packets: */
1485 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1486 case ACK_BUSY_X:
1487 case ACK_BUSY_A:
1488 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1489 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1490 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1491 /* stale generation; cancelled; on certain controllers: no ack */
1492 default: e->phy_packet.rcode = status; break;
1493 }
1494 e->phy_packet.data[0] = packet->timestamp;
1495
1496 queue_event(e->client, &e->event, &e->phy_packet,
1497 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1498 client_put(e->client);
1499 }
1500
ioctl_send_phy_packet(struct client * client,union ioctl_arg * arg)1501 static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1502 {
1503 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1504 struct fw_card *card = client->device->card;
1505 struct outbound_phy_packet_event *e;
1506
1507 /* Access policy: Allow this ioctl only on local nodes' device files. */
1508 if (!client->device->is_local)
1509 return -ENOSYS;
1510
1511 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1512 if (e == NULL)
1513 return -ENOMEM;
1514
1515 client_get(client);
1516 e->client = client;
1517 e->p.speed = SCODE_100;
1518 e->p.generation = a->generation;
1519 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1520 e->p.header[1] = a->data[0];
1521 e->p.header[2] = a->data[1];
1522 e->p.header_length = 12;
1523 e->p.callback = outbound_phy_packet_callback;
1524 e->phy_packet.closure = a->closure;
1525 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1526 if (is_ping_packet(a->data))
1527 e->phy_packet.length = 4;
1528
1529 card->driver->send_request(card, &e->p);
1530
1531 return 0;
1532 }
1533
ioctl_receive_phy_packets(struct client * client,union ioctl_arg * arg)1534 static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1535 {
1536 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1537 struct fw_card *card = client->device->card;
1538
1539 /* Access policy: Allow this ioctl only on local nodes' device files. */
1540 if (!client->device->is_local)
1541 return -ENOSYS;
1542
1543 spin_lock_irq(&card->lock);
1544
1545 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1546 client->phy_receiver_closure = a->closure;
1547
1548 spin_unlock_irq(&card->lock);
1549
1550 return 0;
1551 }
1552
fw_cdev_handle_phy_packet(struct fw_card * card,struct fw_packet * p)1553 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1554 {
1555 struct client *client;
1556 struct inbound_phy_packet_event *e;
1557 unsigned long flags;
1558
1559 spin_lock_irqsave(&card->lock, flags);
1560
1561 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1562 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1563 if (e == NULL) {
1564 fw_notice(card, "out of memory when allocating event\n");
1565 break;
1566 }
1567 e->phy_packet.closure = client->phy_receiver_closure;
1568 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1569 e->phy_packet.rcode = RCODE_COMPLETE;
1570 e->phy_packet.length = 8;
1571 e->phy_packet.data[0] = p->header[1];
1572 e->phy_packet.data[1] = p->header[2];
1573 queue_event(client, &e->event,
1574 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1575 }
1576
1577 spin_unlock_irqrestore(&card->lock, flags);
1578 }
1579
1580 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1581 [0x00] = ioctl_get_info,
1582 [0x01] = ioctl_send_request,
1583 [0x02] = ioctl_allocate,
1584 [0x03] = ioctl_deallocate,
1585 [0x04] = ioctl_send_response,
1586 [0x05] = ioctl_initiate_bus_reset,
1587 [0x06] = ioctl_add_descriptor,
1588 [0x07] = ioctl_remove_descriptor,
1589 [0x08] = ioctl_create_iso_context,
1590 [0x09] = ioctl_queue_iso,
1591 [0x0a] = ioctl_start_iso,
1592 [0x0b] = ioctl_stop_iso,
1593 [0x0c] = ioctl_get_cycle_timer,
1594 [0x0d] = ioctl_allocate_iso_resource,
1595 [0x0e] = ioctl_deallocate_iso_resource,
1596 [0x0f] = ioctl_allocate_iso_resource_once,
1597 [0x10] = ioctl_deallocate_iso_resource_once,
1598 [0x11] = ioctl_get_speed,
1599 [0x12] = ioctl_send_broadcast_request,
1600 [0x13] = ioctl_send_stream_packet,
1601 [0x14] = ioctl_get_cycle_timer2,
1602 [0x15] = ioctl_send_phy_packet,
1603 [0x16] = ioctl_receive_phy_packets,
1604 [0x17] = ioctl_set_iso_channels,
1605 [0x18] = ioctl_flush_iso,
1606 };
1607
dispatch_ioctl(struct client * client,unsigned int cmd,void __user * arg)1608 static int dispatch_ioctl(struct client *client,
1609 unsigned int cmd, void __user *arg)
1610 {
1611 union ioctl_arg buffer;
1612 int ret;
1613
1614 if (fw_device_is_shutdown(client->device))
1615 return -ENODEV;
1616
1617 if (_IOC_TYPE(cmd) != '#' ||
1618 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1619 _IOC_SIZE(cmd) > sizeof(buffer))
1620 return -ENOTTY;
1621
1622 if (_IOC_DIR(cmd) == _IOC_READ)
1623 memset(&buffer, 0, _IOC_SIZE(cmd));
1624
1625 if (_IOC_DIR(cmd) & _IOC_WRITE)
1626 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1627 return -EFAULT;
1628
1629 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1630 if (ret < 0)
1631 return ret;
1632
1633 if (_IOC_DIR(cmd) & _IOC_READ)
1634 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1635 return -EFAULT;
1636
1637 return ret;
1638 }
1639
fw_device_op_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1640 static long fw_device_op_ioctl(struct file *file,
1641 unsigned int cmd, unsigned long arg)
1642 {
1643 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1644 }
1645
1646 #ifdef CONFIG_COMPAT
fw_device_op_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1647 static long fw_device_op_compat_ioctl(struct file *file,
1648 unsigned int cmd, unsigned long arg)
1649 {
1650 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1651 }
1652 #endif
1653
fw_device_op_mmap(struct file * file,struct vm_area_struct * vma)1654 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1655 {
1656 struct client *client = file->private_data;
1657 enum dma_data_direction direction;
1658 unsigned long size;
1659 int page_count, ret;
1660
1661 if (fw_device_is_shutdown(client->device))
1662 return -ENODEV;
1663
1664 /* FIXME: We could support multiple buffers, but we don't. */
1665 if (client->buffer.pages != NULL)
1666 return -EBUSY;
1667
1668 if (!(vma->vm_flags & VM_SHARED))
1669 return -EINVAL;
1670
1671 if (vma->vm_start & ~PAGE_MASK)
1672 return -EINVAL;
1673
1674 client->vm_start = vma->vm_start;
1675 size = vma->vm_end - vma->vm_start;
1676 page_count = size >> PAGE_SHIFT;
1677 if (size & ~PAGE_MASK)
1678 return -EINVAL;
1679
1680 if (vma->vm_flags & VM_WRITE)
1681 direction = DMA_TO_DEVICE;
1682 else
1683 direction = DMA_FROM_DEVICE;
1684
1685 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1686 page_count, direction);
1687 if (ret < 0)
1688 return ret;
1689
1690 ret = fw_iso_buffer_map(&client->buffer, vma);
1691 if (ret < 0)
1692 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1693
1694 return ret;
1695 }
1696
is_outbound_transaction_resource(int id,void * p,void * data)1697 static int is_outbound_transaction_resource(int id, void *p, void *data)
1698 {
1699 struct client_resource *resource = p;
1700
1701 return resource->release == release_transaction;
1702 }
1703
has_outbound_transactions(struct client * client)1704 static int has_outbound_transactions(struct client *client)
1705 {
1706 int ret;
1707
1708 spin_lock_irq(&client->lock);
1709 ret = idr_for_each(&client->resource_idr,
1710 is_outbound_transaction_resource, NULL);
1711 spin_unlock_irq(&client->lock);
1712
1713 return ret;
1714 }
1715
shutdown_resource(int id,void * p,void * data)1716 static int shutdown_resource(int id, void *p, void *data)
1717 {
1718 struct client_resource *resource = p;
1719 struct client *client = data;
1720
1721 resource->release(client, resource);
1722 client_put(client);
1723
1724 return 0;
1725 }
1726
fw_device_op_release(struct inode * inode,struct file * file)1727 static int fw_device_op_release(struct inode *inode, struct file *file)
1728 {
1729 struct client *client = file->private_data;
1730 struct event *event, *next_event;
1731
1732 spin_lock_irq(&client->device->card->lock);
1733 list_del(&client->phy_receiver_link);
1734 spin_unlock_irq(&client->device->card->lock);
1735
1736 mutex_lock(&client->device->client_list_mutex);
1737 list_del(&client->link);
1738 mutex_unlock(&client->device->client_list_mutex);
1739
1740 if (client->iso_context)
1741 fw_iso_context_destroy(client->iso_context);
1742
1743 if (client->buffer.pages)
1744 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1745
1746 /* Freeze client->resource_idr and client->event_list */
1747 spin_lock_irq(&client->lock);
1748 client->in_shutdown = true;
1749 spin_unlock_irq(&client->lock);
1750
1751 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1752
1753 idr_for_each(&client->resource_idr, shutdown_resource, client);
1754 idr_remove_all(&client->resource_idr);
1755 idr_destroy(&client->resource_idr);
1756
1757 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1758 kfree(event);
1759
1760 client_put(client);
1761
1762 return 0;
1763 }
1764
fw_device_op_poll(struct file * file,poll_table * pt)1765 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1766 {
1767 struct client *client = file->private_data;
1768 unsigned int mask = 0;
1769
1770 poll_wait(file, &client->wait, pt);
1771
1772 if (fw_device_is_shutdown(client->device))
1773 mask |= POLLHUP | POLLERR;
1774 if (!list_empty(&client->event_list))
1775 mask |= POLLIN | POLLRDNORM;
1776
1777 return mask;
1778 }
1779
1780 const struct file_operations fw_device_ops = {
1781 .owner = THIS_MODULE,
1782 .llseek = no_llseek,
1783 .open = fw_device_op_open,
1784 .read = fw_device_op_read,
1785 .unlocked_ioctl = fw_device_op_ioctl,
1786 .mmap = fw_device_op_mmap,
1787 .release = fw_device_op_release,
1788 .poll = fw_device_op_poll,
1789 #ifdef CONFIG_COMPAT
1790 .compat_ioctl = fw_device_op_compat_ioctl,
1791 #endif
1792 };
1793