1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for the Diolan DLN-2 USB adapter
4 *
5 * Copyright (c) 2014 Intel Corporation
6 *
7 * Derived from:
8 * i2c-diolan-u2c.c
9 * Copyright (c) 2010-2011 Ericsson AB
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/usb.h>
17 #include <linux/i2c.h>
18 #include <linux/mutex.h>
19 #include <linux/platform_device.h>
20 #include <linux/mfd/core.h>
21 #include <linux/mfd/dln2.h>
22 #include <linux/rculist.h>
23
24 struct dln2_header {
25 __le16 size;
26 __le16 id;
27 __le16 echo;
28 __le16 handle;
29 };
30
31 struct dln2_response {
32 struct dln2_header hdr;
33 __le16 result;
34 };
35
36 #define DLN2_GENERIC_MODULE_ID 0x00
37 #define DLN2_GENERIC_CMD(cmd) DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID)
38 #define CMD_GET_DEVICE_VER DLN2_GENERIC_CMD(0x30)
39 #define CMD_GET_DEVICE_SN DLN2_GENERIC_CMD(0x31)
40
41 #define DLN2_HW_ID 0x200
42 #define DLN2_USB_TIMEOUT 200 /* in ms */
43 #define DLN2_MAX_RX_SLOTS 16
44 #define DLN2_MAX_URBS 16
45 #define DLN2_RX_BUF_SIZE 512
46
47 enum dln2_handle {
48 DLN2_HANDLE_EVENT = 0, /* don't change, hardware defined */
49 DLN2_HANDLE_CTRL,
50 DLN2_HANDLE_GPIO,
51 DLN2_HANDLE_I2C,
52 DLN2_HANDLE_SPI,
53 DLN2_HANDLE_ADC,
54 DLN2_HANDLES
55 };
56
57 /*
58 * Receive context used between the receive demultiplexer and the transfer
59 * routine. While sending a request the transfer routine will look for a free
60 * receive context and use it to wait for a response and to receive the URB and
61 * thus the response data.
62 */
63 struct dln2_rx_context {
64 /* completion used to wait for a response */
65 struct completion done;
66
67 /* if non-NULL the URB contains the response */
68 struct urb *urb;
69
70 /* if true then this context is used to wait for a response */
71 bool in_use;
72 };
73
74 /*
75 * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the
76 * handle header field to identify the module in dln2_dev.mod_rx_slots and then
77 * the echo header field to index the slots field and find the receive context
78 * for a particular request.
79 */
80 struct dln2_mod_rx_slots {
81 /* RX slots bitmap */
82 DECLARE_BITMAP(bmap, DLN2_MAX_RX_SLOTS);
83
84 /* used to wait for a free RX slot */
85 wait_queue_head_t wq;
86
87 /* used to wait for an RX operation to complete */
88 struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS];
89
90 /* avoid races between alloc/free_rx_slot and dln2_rx_transfer */
91 spinlock_t lock;
92 };
93
94 enum dln2_endpoint {
95 DLN2_EP_OUT = 0,
96 DLN2_EP_IN = 1,
97 };
98
99 struct dln2_dev {
100 struct usb_device *usb_dev;
101 struct usb_interface *interface;
102 u8 ep_in;
103 u8 ep_out;
104
105 struct urb *rx_urb[DLN2_MAX_URBS];
106 void *rx_buf[DLN2_MAX_URBS];
107
108 struct dln2_mod_rx_slots mod_rx_slots[DLN2_HANDLES];
109
110 struct list_head event_cb_list;
111 spinlock_t event_cb_lock;
112
113 bool disconnect;
114 int active_transfers;
115 wait_queue_head_t disconnect_wq;
116 spinlock_t disconnect_lock;
117 };
118
119 struct dln2_event_cb_entry {
120 struct list_head list;
121 u16 id;
122 struct platform_device *pdev;
123 dln2_event_cb_t callback;
124 };
125
dln2_register_event_cb(struct platform_device * pdev,u16 id,dln2_event_cb_t event_cb)126 int dln2_register_event_cb(struct platform_device *pdev, u16 id,
127 dln2_event_cb_t event_cb)
128 {
129 struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
130 struct dln2_event_cb_entry *i, *entry;
131 unsigned long flags;
132 int ret = 0;
133
134 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
135 if (!entry)
136 return -ENOMEM;
137
138 entry->id = id;
139 entry->callback = event_cb;
140 entry->pdev = pdev;
141
142 spin_lock_irqsave(&dln2->event_cb_lock, flags);
143
144 list_for_each_entry(i, &dln2->event_cb_list, list) {
145 if (i->id == id) {
146 ret = -EBUSY;
147 break;
148 }
149 }
150
151 if (!ret)
152 list_add_rcu(&entry->list, &dln2->event_cb_list);
153
154 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
155
156 if (ret)
157 kfree(entry);
158
159 return ret;
160 }
161 EXPORT_SYMBOL(dln2_register_event_cb);
162
dln2_unregister_event_cb(struct platform_device * pdev,u16 id)163 void dln2_unregister_event_cb(struct platform_device *pdev, u16 id)
164 {
165 struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
166 struct dln2_event_cb_entry *i;
167 unsigned long flags;
168 bool found = false;
169
170 spin_lock_irqsave(&dln2->event_cb_lock, flags);
171
172 list_for_each_entry(i, &dln2->event_cb_list, list) {
173 if (i->id == id) {
174 list_del_rcu(&i->list);
175 found = true;
176 break;
177 }
178 }
179
180 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
181
182 if (found) {
183 synchronize_rcu();
184 kfree(i);
185 }
186 }
187 EXPORT_SYMBOL(dln2_unregister_event_cb);
188
189 /*
190 * Returns true if a valid transfer slot is found. In this case the URB must not
191 * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer
192 * is woke up. It will be resubmitted there.
193 */
dln2_transfer_complete(struct dln2_dev * dln2,struct urb * urb,u16 handle,u16 rx_slot)194 static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
195 u16 handle, u16 rx_slot)
196 {
197 struct device *dev = &dln2->interface->dev;
198 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
199 struct dln2_rx_context *rxc;
200 unsigned long flags;
201 bool valid_slot = false;
202
203 if (rx_slot >= DLN2_MAX_RX_SLOTS)
204 goto out;
205
206 rxc = &rxs->slots[rx_slot];
207
208 spin_lock_irqsave(&rxs->lock, flags);
209 if (rxc->in_use && !rxc->urb) {
210 rxc->urb = urb;
211 complete(&rxc->done);
212 valid_slot = true;
213 }
214 spin_unlock_irqrestore(&rxs->lock, flags);
215
216 out:
217 if (!valid_slot)
218 dev_warn(dev, "bad/late response %d/%d\n", handle, rx_slot);
219
220 return valid_slot;
221 }
222
dln2_run_event_callbacks(struct dln2_dev * dln2,u16 id,u16 echo,void * data,int len)223 static void dln2_run_event_callbacks(struct dln2_dev *dln2, u16 id, u16 echo,
224 void *data, int len)
225 {
226 struct dln2_event_cb_entry *i;
227
228 rcu_read_lock();
229
230 list_for_each_entry_rcu(i, &dln2->event_cb_list, list) {
231 if (i->id == id) {
232 i->callback(i->pdev, echo, data, len);
233 break;
234 }
235 }
236
237 rcu_read_unlock();
238 }
239
dln2_rx(struct urb * urb)240 static void dln2_rx(struct urb *urb)
241 {
242 struct dln2_dev *dln2 = urb->context;
243 struct dln2_header *hdr = urb->transfer_buffer;
244 struct device *dev = &dln2->interface->dev;
245 u16 id, echo, handle, size;
246 u8 *data;
247 int len;
248 int err;
249
250 switch (urb->status) {
251 case 0:
252 /* success */
253 break;
254 case -ECONNRESET:
255 case -ENOENT:
256 case -ESHUTDOWN:
257 case -EPIPE:
258 /* this urb is terminated, clean up */
259 dev_dbg(dev, "urb shutting down with status %d\n", urb->status);
260 return;
261 default:
262 dev_dbg(dev, "nonzero urb status received %d\n", urb->status);
263 goto out;
264 }
265
266 if (urb->actual_length < sizeof(struct dln2_header)) {
267 dev_err(dev, "short response: %d\n", urb->actual_length);
268 goto out;
269 }
270
271 handle = le16_to_cpu(hdr->handle);
272 id = le16_to_cpu(hdr->id);
273 echo = le16_to_cpu(hdr->echo);
274 size = le16_to_cpu(hdr->size);
275
276 if (size != urb->actual_length) {
277 dev_err(dev, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n",
278 handle, id, echo, size, urb->actual_length);
279 goto out;
280 }
281
282 if (handle >= DLN2_HANDLES) {
283 dev_warn(dev, "invalid handle %d\n", handle);
284 goto out;
285 }
286
287 data = urb->transfer_buffer + sizeof(struct dln2_header);
288 len = urb->actual_length - sizeof(struct dln2_header);
289
290 if (handle == DLN2_HANDLE_EVENT) {
291 unsigned long flags;
292
293 spin_lock_irqsave(&dln2->event_cb_lock, flags);
294 dln2_run_event_callbacks(dln2, id, echo, data, len);
295 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
296 } else {
297 /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
298 if (dln2_transfer_complete(dln2, urb, handle, echo))
299 return;
300 }
301
302 out:
303 err = usb_submit_urb(urb, GFP_ATOMIC);
304 if (err < 0)
305 dev_err(dev, "failed to resubmit RX URB: %d\n", err);
306 }
307
dln2_prep_buf(u16 handle,u16 cmd,u16 echo,const void * obuf,int * obuf_len,gfp_t gfp)308 static void *dln2_prep_buf(u16 handle, u16 cmd, u16 echo, const void *obuf,
309 int *obuf_len, gfp_t gfp)
310 {
311 int len;
312 void *buf;
313 struct dln2_header *hdr;
314
315 len = *obuf_len + sizeof(*hdr);
316 buf = kmalloc(len, gfp);
317 if (!buf)
318 return NULL;
319
320 hdr = (struct dln2_header *)buf;
321 hdr->id = cpu_to_le16(cmd);
322 hdr->size = cpu_to_le16(len);
323 hdr->echo = cpu_to_le16(echo);
324 hdr->handle = cpu_to_le16(handle);
325
326 memcpy(buf + sizeof(*hdr), obuf, *obuf_len);
327
328 *obuf_len = len;
329
330 return buf;
331 }
332
dln2_send_wait(struct dln2_dev * dln2,u16 handle,u16 cmd,u16 echo,const void * obuf,int obuf_len)333 static int dln2_send_wait(struct dln2_dev *dln2, u16 handle, u16 cmd, u16 echo,
334 const void *obuf, int obuf_len)
335 {
336 int ret = 0;
337 int len = obuf_len;
338 void *buf;
339 int actual;
340
341 buf = dln2_prep_buf(handle, cmd, echo, obuf, &len, GFP_KERNEL);
342 if (!buf)
343 return -ENOMEM;
344
345 ret = usb_bulk_msg(dln2->usb_dev,
346 usb_sndbulkpipe(dln2->usb_dev, dln2->ep_out),
347 buf, len, &actual, DLN2_USB_TIMEOUT);
348
349 kfree(buf);
350
351 return ret;
352 }
353
find_free_slot(struct dln2_dev * dln2,u16 handle,int * slot)354 static bool find_free_slot(struct dln2_dev *dln2, u16 handle, int *slot)
355 {
356 struct dln2_mod_rx_slots *rxs;
357 unsigned long flags;
358
359 if (dln2->disconnect) {
360 *slot = -ENODEV;
361 return true;
362 }
363
364 rxs = &dln2->mod_rx_slots[handle];
365
366 spin_lock_irqsave(&rxs->lock, flags);
367
368 *slot = find_first_zero_bit(rxs->bmap, DLN2_MAX_RX_SLOTS);
369
370 if (*slot < DLN2_MAX_RX_SLOTS) {
371 struct dln2_rx_context *rxc = &rxs->slots[*slot];
372
373 set_bit(*slot, rxs->bmap);
374 rxc->in_use = true;
375 }
376
377 spin_unlock_irqrestore(&rxs->lock, flags);
378
379 return *slot < DLN2_MAX_RX_SLOTS;
380 }
381
alloc_rx_slot(struct dln2_dev * dln2,u16 handle)382 static int alloc_rx_slot(struct dln2_dev *dln2, u16 handle)
383 {
384 int ret;
385 int slot;
386
387 /*
388 * No need to timeout here, the wait is bounded by the timeout in
389 * _dln2_transfer.
390 */
391 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq,
392 find_free_slot(dln2, handle, &slot));
393 if (ret < 0)
394 return ret;
395
396 return slot;
397 }
398
free_rx_slot(struct dln2_dev * dln2,u16 handle,int slot)399 static void free_rx_slot(struct dln2_dev *dln2, u16 handle, int slot)
400 {
401 struct dln2_mod_rx_slots *rxs;
402 struct urb *urb = NULL;
403 unsigned long flags;
404 struct dln2_rx_context *rxc;
405
406 rxs = &dln2->mod_rx_slots[handle];
407
408 spin_lock_irqsave(&rxs->lock, flags);
409
410 clear_bit(slot, rxs->bmap);
411
412 rxc = &rxs->slots[slot];
413 rxc->in_use = false;
414 urb = rxc->urb;
415 rxc->urb = NULL;
416 reinit_completion(&rxc->done);
417
418 spin_unlock_irqrestore(&rxs->lock, flags);
419
420 if (urb) {
421 int err;
422 struct device *dev = &dln2->interface->dev;
423
424 err = usb_submit_urb(urb, GFP_KERNEL);
425 if (err < 0)
426 dev_err(dev, "failed to resubmit RX URB: %d\n", err);
427 }
428
429 wake_up_interruptible(&rxs->wq);
430 }
431
_dln2_transfer(struct dln2_dev * dln2,u16 handle,u16 cmd,const void * obuf,unsigned obuf_len,void * ibuf,unsigned * ibuf_len)432 static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd,
433 const void *obuf, unsigned obuf_len,
434 void *ibuf, unsigned *ibuf_len)
435 {
436 int ret = 0;
437 int rx_slot;
438 struct dln2_response *rsp;
439 struct dln2_rx_context *rxc;
440 struct device *dev = &dln2->interface->dev;
441 const unsigned long timeout = msecs_to_jiffies(DLN2_USB_TIMEOUT);
442 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
443 int size;
444
445 spin_lock(&dln2->disconnect_lock);
446 if (!dln2->disconnect)
447 dln2->active_transfers++;
448 else
449 ret = -ENODEV;
450 spin_unlock(&dln2->disconnect_lock);
451
452 if (ret)
453 return ret;
454
455 rx_slot = alloc_rx_slot(dln2, handle);
456 if (rx_slot < 0) {
457 ret = rx_slot;
458 goto out_decr;
459 }
460
461 ret = dln2_send_wait(dln2, handle, cmd, rx_slot, obuf, obuf_len);
462 if (ret < 0) {
463 dev_err(dev, "USB write failed: %d\n", ret);
464 goto out_free_rx_slot;
465 }
466
467 rxc = &rxs->slots[rx_slot];
468
469 ret = wait_for_completion_interruptible_timeout(&rxc->done, timeout);
470 if (ret <= 0) {
471 if (!ret)
472 ret = -ETIMEDOUT;
473 goto out_free_rx_slot;
474 } else {
475 ret = 0;
476 }
477
478 if (dln2->disconnect) {
479 ret = -ENODEV;
480 goto out_free_rx_slot;
481 }
482
483 /* if we got here we know that the response header has been checked */
484 rsp = rxc->urb->transfer_buffer;
485 size = le16_to_cpu(rsp->hdr.size);
486
487 if (size < sizeof(*rsp)) {
488 ret = -EPROTO;
489 goto out_free_rx_slot;
490 }
491
492 if (le16_to_cpu(rsp->result) > 0x80) {
493 dev_dbg(dev, "%d received response with error %d\n",
494 handle, le16_to_cpu(rsp->result));
495 ret = -EREMOTEIO;
496 goto out_free_rx_slot;
497 }
498
499 if (!ibuf)
500 goto out_free_rx_slot;
501
502 if (*ibuf_len > size - sizeof(*rsp))
503 *ibuf_len = size - sizeof(*rsp);
504
505 memcpy(ibuf, rsp + 1, *ibuf_len);
506
507 out_free_rx_slot:
508 free_rx_slot(dln2, handle, rx_slot);
509 out_decr:
510 spin_lock(&dln2->disconnect_lock);
511 dln2->active_transfers--;
512 spin_unlock(&dln2->disconnect_lock);
513 if (dln2->disconnect)
514 wake_up(&dln2->disconnect_wq);
515
516 return ret;
517 }
518
dln2_transfer(struct platform_device * pdev,u16 cmd,const void * obuf,unsigned obuf_len,void * ibuf,unsigned * ibuf_len)519 int dln2_transfer(struct platform_device *pdev, u16 cmd,
520 const void *obuf, unsigned obuf_len,
521 void *ibuf, unsigned *ibuf_len)
522 {
523 struct dln2_platform_data *dln2_pdata;
524 struct dln2_dev *dln2;
525 u16 handle;
526
527 dln2 = dev_get_drvdata(pdev->dev.parent);
528 dln2_pdata = dev_get_platdata(&pdev->dev);
529 handle = dln2_pdata->handle;
530
531 return _dln2_transfer(dln2, handle, cmd, obuf, obuf_len, ibuf,
532 ibuf_len);
533 }
534 EXPORT_SYMBOL(dln2_transfer);
535
dln2_check_hw(struct dln2_dev * dln2)536 static int dln2_check_hw(struct dln2_dev *dln2)
537 {
538 int ret;
539 __le32 hw_type;
540 int len = sizeof(hw_type);
541
542 ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_VER,
543 NULL, 0, &hw_type, &len);
544 if (ret < 0)
545 return ret;
546 if (len < sizeof(hw_type))
547 return -EREMOTEIO;
548
549 if (le32_to_cpu(hw_type) != DLN2_HW_ID) {
550 dev_err(&dln2->interface->dev, "Device ID 0x%x not supported\n",
551 le32_to_cpu(hw_type));
552 return -ENODEV;
553 }
554
555 return 0;
556 }
557
dln2_print_serialno(struct dln2_dev * dln2)558 static int dln2_print_serialno(struct dln2_dev *dln2)
559 {
560 int ret;
561 __le32 serial_no;
562 int len = sizeof(serial_no);
563 struct device *dev = &dln2->interface->dev;
564
565 ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_SN, NULL, 0,
566 &serial_no, &len);
567 if (ret < 0)
568 return ret;
569 if (len < sizeof(serial_no))
570 return -EREMOTEIO;
571
572 dev_info(dev, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no));
573
574 return 0;
575 }
576
dln2_hw_init(struct dln2_dev * dln2)577 static int dln2_hw_init(struct dln2_dev *dln2)
578 {
579 int ret;
580
581 ret = dln2_check_hw(dln2);
582 if (ret < 0)
583 return ret;
584
585 return dln2_print_serialno(dln2);
586 }
587
dln2_free_rx_urbs(struct dln2_dev * dln2)588 static void dln2_free_rx_urbs(struct dln2_dev *dln2)
589 {
590 int i;
591
592 for (i = 0; i < DLN2_MAX_URBS; i++) {
593 usb_free_urb(dln2->rx_urb[i]);
594 kfree(dln2->rx_buf[i]);
595 }
596 }
597
dln2_stop_rx_urbs(struct dln2_dev * dln2)598 static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
599 {
600 int i;
601
602 for (i = 0; i < DLN2_MAX_URBS; i++)
603 usb_kill_urb(dln2->rx_urb[i]);
604 }
605
dln2_free(struct dln2_dev * dln2)606 static void dln2_free(struct dln2_dev *dln2)
607 {
608 dln2_free_rx_urbs(dln2);
609 usb_put_dev(dln2->usb_dev);
610 kfree(dln2);
611 }
612
dln2_setup_rx_urbs(struct dln2_dev * dln2,struct usb_host_interface * hostif)613 static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
614 struct usb_host_interface *hostif)
615 {
616 int i;
617 const int rx_max_size = DLN2_RX_BUF_SIZE;
618
619 for (i = 0; i < DLN2_MAX_URBS; i++) {
620 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
621 if (!dln2->rx_buf[i])
622 return -ENOMEM;
623
624 dln2->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
625 if (!dln2->rx_urb[i])
626 return -ENOMEM;
627
628 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
629 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
630 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
631 }
632
633 return 0;
634 }
635
dln2_start_rx_urbs(struct dln2_dev * dln2,gfp_t gfp)636 static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
637 {
638 struct device *dev = &dln2->interface->dev;
639 int ret;
640 int i;
641
642 for (i = 0; i < DLN2_MAX_URBS; i++) {
643 ret = usb_submit_urb(dln2->rx_urb[i], gfp);
644 if (ret < 0) {
645 dev_err(dev, "failed to submit RX URB: %d\n", ret);
646 return ret;
647 }
648 }
649
650 return 0;
651 }
652
653 enum {
654 DLN2_ACPI_MATCH_GPIO = 0,
655 DLN2_ACPI_MATCH_I2C = 1,
656 DLN2_ACPI_MATCH_SPI = 2,
657 DLN2_ACPI_MATCH_ADC = 3,
658 };
659
660 static struct dln2_platform_data dln2_pdata_gpio = {
661 .handle = DLN2_HANDLE_GPIO,
662 };
663
664 static struct mfd_cell_acpi_match dln2_acpi_match_gpio = {
665 .adr = DLN2_ACPI_MATCH_GPIO,
666 };
667
668 /* Only one I2C port seems to be supported on current hardware */
669 static struct dln2_platform_data dln2_pdata_i2c = {
670 .handle = DLN2_HANDLE_I2C,
671 .port = 0,
672 };
673
674 static struct mfd_cell_acpi_match dln2_acpi_match_i2c = {
675 .adr = DLN2_ACPI_MATCH_I2C,
676 };
677
678 /* Only one SPI port supported */
679 static struct dln2_platform_data dln2_pdata_spi = {
680 .handle = DLN2_HANDLE_SPI,
681 .port = 0,
682 };
683
684 static struct mfd_cell_acpi_match dln2_acpi_match_spi = {
685 .adr = DLN2_ACPI_MATCH_SPI,
686 };
687
688 /* Only one ADC port supported */
689 static struct dln2_platform_data dln2_pdata_adc = {
690 .handle = DLN2_HANDLE_ADC,
691 .port = 0,
692 };
693
694 static struct mfd_cell_acpi_match dln2_acpi_match_adc = {
695 .adr = DLN2_ACPI_MATCH_ADC,
696 };
697
698 static const struct mfd_cell dln2_devs[] = {
699 {
700 .name = "dln2-gpio",
701 .acpi_match = &dln2_acpi_match_gpio,
702 .platform_data = &dln2_pdata_gpio,
703 .pdata_size = sizeof(struct dln2_platform_data),
704 },
705 {
706 .name = "dln2-i2c",
707 .acpi_match = &dln2_acpi_match_i2c,
708 .platform_data = &dln2_pdata_i2c,
709 .pdata_size = sizeof(struct dln2_platform_data),
710 },
711 {
712 .name = "dln2-spi",
713 .acpi_match = &dln2_acpi_match_spi,
714 .platform_data = &dln2_pdata_spi,
715 .pdata_size = sizeof(struct dln2_platform_data),
716 },
717 {
718 .name = "dln2-adc",
719 .acpi_match = &dln2_acpi_match_adc,
720 .platform_data = &dln2_pdata_adc,
721 .pdata_size = sizeof(struct dln2_platform_data),
722 },
723 };
724
dln2_stop(struct dln2_dev * dln2)725 static void dln2_stop(struct dln2_dev *dln2)
726 {
727 int i, j;
728
729 /* don't allow starting new transfers */
730 spin_lock(&dln2->disconnect_lock);
731 dln2->disconnect = true;
732 spin_unlock(&dln2->disconnect_lock);
733
734 /* cancel in progress transfers */
735 for (i = 0; i < DLN2_HANDLES; i++) {
736 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[i];
737 unsigned long flags;
738
739 spin_lock_irqsave(&rxs->lock, flags);
740
741 /* cancel all response waiters */
742 for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) {
743 struct dln2_rx_context *rxc = &rxs->slots[j];
744
745 if (rxc->in_use)
746 complete(&rxc->done);
747 }
748
749 spin_unlock_irqrestore(&rxs->lock, flags);
750 }
751
752 /* wait for transfers to end */
753 wait_event(dln2->disconnect_wq, !dln2->active_transfers);
754
755 dln2_stop_rx_urbs(dln2);
756 }
757
dln2_disconnect(struct usb_interface * interface)758 static void dln2_disconnect(struct usb_interface *interface)
759 {
760 struct dln2_dev *dln2 = usb_get_intfdata(interface);
761
762 dln2_stop(dln2);
763
764 mfd_remove_devices(&interface->dev);
765
766 dln2_free(dln2);
767 }
768
dln2_probe(struct usb_interface * interface,const struct usb_device_id * usb_id)769 static int dln2_probe(struct usb_interface *interface,
770 const struct usb_device_id *usb_id)
771 {
772 struct usb_host_interface *hostif = interface->cur_altsetting;
773 struct usb_endpoint_descriptor *epin;
774 struct usb_endpoint_descriptor *epout;
775 struct device *dev = &interface->dev;
776 struct dln2_dev *dln2;
777 int ret;
778 int i, j;
779
780 if (hostif->desc.bInterfaceNumber != 0 ||
781 hostif->desc.bNumEndpoints < 2)
782 return -ENODEV;
783
784 epout = &hostif->endpoint[DLN2_EP_OUT].desc;
785 if (!usb_endpoint_is_bulk_out(epout))
786 return -ENODEV;
787 epin = &hostif->endpoint[DLN2_EP_IN].desc;
788 if (!usb_endpoint_is_bulk_in(epin))
789 return -ENODEV;
790
791 dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
792 if (!dln2)
793 return -ENOMEM;
794
795 dln2->ep_out = epout->bEndpointAddress;
796 dln2->ep_in = epin->bEndpointAddress;
797 dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
798 dln2->interface = interface;
799 usb_set_intfdata(interface, dln2);
800 init_waitqueue_head(&dln2->disconnect_wq);
801
802 for (i = 0; i < DLN2_HANDLES; i++) {
803 init_waitqueue_head(&dln2->mod_rx_slots[i].wq);
804 spin_lock_init(&dln2->mod_rx_slots[i].lock);
805 for (j = 0; j < DLN2_MAX_RX_SLOTS; j++)
806 init_completion(&dln2->mod_rx_slots[i].slots[j].done);
807 }
808
809 spin_lock_init(&dln2->event_cb_lock);
810 spin_lock_init(&dln2->disconnect_lock);
811 INIT_LIST_HEAD(&dln2->event_cb_list);
812
813 ret = dln2_setup_rx_urbs(dln2, hostif);
814 if (ret)
815 goto out_free;
816
817 ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
818 if (ret)
819 goto out_stop_rx;
820
821 ret = dln2_hw_init(dln2);
822 if (ret < 0) {
823 dev_err(dev, "failed to initialize hardware\n");
824 goto out_stop_rx;
825 }
826
827 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
828 if (ret != 0) {
829 dev_err(dev, "failed to add mfd devices to core\n");
830 goto out_stop_rx;
831 }
832
833 return 0;
834
835 out_stop_rx:
836 dln2_stop_rx_urbs(dln2);
837
838 out_free:
839 dln2_free(dln2);
840
841 return ret;
842 }
843
dln2_suspend(struct usb_interface * iface,pm_message_t message)844 static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
845 {
846 struct dln2_dev *dln2 = usb_get_intfdata(iface);
847
848 dln2_stop(dln2);
849
850 return 0;
851 }
852
dln2_resume(struct usb_interface * iface)853 static int dln2_resume(struct usb_interface *iface)
854 {
855 struct dln2_dev *dln2 = usb_get_intfdata(iface);
856
857 dln2->disconnect = false;
858
859 return dln2_start_rx_urbs(dln2, GFP_NOIO);
860 }
861
862 static const struct usb_device_id dln2_table[] = {
863 { USB_DEVICE(0xa257, 0x2013) },
864 { }
865 };
866
867 MODULE_DEVICE_TABLE(usb, dln2_table);
868
869 static struct usb_driver dln2_driver = {
870 .name = "dln2",
871 .probe = dln2_probe,
872 .disconnect = dln2_disconnect,
873 .id_table = dln2_table,
874 .suspend = dln2_suspend,
875 .resume = dln2_resume,
876 };
877
878 module_usb_driver(dln2_driver);
879
880 MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
881 MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter");
882 MODULE_LICENSE("GPL v2");
883