1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/mei_cl_bus.h>
17
18 #include "mei_dev.h"
19 #include "client.h"
20
21 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
22
23 /**
24 * __mei_cl_send - internal client send (write)
25 *
26 * @cl: host client
27 * @buf: buffer to send
28 * @length: buffer length
29 * @vtag: virtual tag
30 * @mode: sending mode
31 *
32 * Return: written size bytes or < 0 on error
33 */
__mei_cl_send(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode)34 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
35 unsigned int mode)
36 {
37 struct mei_device *bus;
38 struct mei_cl_cb *cb;
39 ssize_t rets;
40
41 if (WARN_ON(!cl || !cl->dev))
42 return -ENODEV;
43
44 bus = cl->dev;
45
46 mutex_lock(&bus->device_lock);
47 if (bus->dev_state != MEI_DEV_ENABLED &&
48 bus->dev_state != MEI_DEV_POWERING_DOWN) {
49 rets = -ENODEV;
50 goto out;
51 }
52
53 if (!mei_cl_is_connected(cl)) {
54 rets = -ENODEV;
55 goto out;
56 }
57
58 /* Check if we have an ME client device */
59 if (!mei_me_cl_is_active(cl->me_cl)) {
60 rets = -ENOTTY;
61 goto out;
62 }
63
64 if (vtag) {
65 /* Check if vtag is supported by client */
66 rets = mei_cl_vt_support_check(cl);
67 if (rets)
68 goto out;
69 }
70
71 if (length > mei_cl_mtu(cl)) {
72 rets = -EFBIG;
73 goto out;
74 }
75
76 while (cl->tx_cb_queued >= bus->tx_queue_limit) {
77 mutex_unlock(&bus->device_lock);
78 rets = wait_event_interruptible(cl->tx_wait,
79 cl->writing_state == MEI_WRITE_COMPLETE ||
80 (!mei_cl_is_connected(cl)));
81 mutex_lock(&bus->device_lock);
82 if (rets) {
83 if (signal_pending(current))
84 rets = -EINTR;
85 goto out;
86 }
87 if (!mei_cl_is_connected(cl)) {
88 rets = -ENODEV;
89 goto out;
90 }
91 }
92
93 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
94 if (!cb) {
95 rets = -ENOMEM;
96 goto out;
97 }
98 cb->vtag = vtag;
99
100 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
101 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
102 memcpy(cb->buf.data, buf, length);
103
104 rets = mei_cl_write(cl, cb);
105
106 out:
107 mutex_unlock(&bus->device_lock);
108
109 return rets;
110 }
111
112 /**
113 * __mei_cl_recv - internal client receive (read)
114 *
115 * @cl: host client
116 * @buf: buffer to receive
117 * @length: buffer length
118 * @mode: io mode
119 * @vtag: virtual tag
120 * @timeout: recv timeout, 0 for infinite timeout
121 *
122 * Return: read size in bytes of < 0 on error
123 */
__mei_cl_recv(struct mei_cl * cl,u8 * buf,size_t length,u8 * vtag,unsigned int mode,unsigned long timeout)124 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
125 unsigned int mode, unsigned long timeout)
126 {
127 struct mei_device *bus;
128 struct mei_cl_cb *cb;
129 size_t r_length;
130 ssize_t rets;
131 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
132
133 if (WARN_ON(!cl || !cl->dev))
134 return -ENODEV;
135
136 bus = cl->dev;
137
138 mutex_lock(&bus->device_lock);
139 if (bus->dev_state != MEI_DEV_ENABLED &&
140 bus->dev_state != MEI_DEV_POWERING_DOWN) {
141 rets = -ENODEV;
142 goto out;
143 }
144
145 cb = mei_cl_read_cb(cl, NULL);
146 if (cb)
147 goto copy;
148
149 rets = mei_cl_read_start(cl, length, NULL);
150 if (rets && rets != -EBUSY)
151 goto out;
152
153 if (nonblock) {
154 rets = -EAGAIN;
155 goto out;
156 }
157
158 /* wait on event only if there is no other waiter */
159 /* synchronized under device mutex */
160 if (!waitqueue_active(&cl->rx_wait)) {
161
162 mutex_unlock(&bus->device_lock);
163
164 if (timeout) {
165 rets = wait_event_interruptible_timeout
166 (cl->rx_wait,
167 mei_cl_read_cb(cl, NULL) ||
168 (!mei_cl_is_connected(cl)),
169 msecs_to_jiffies(timeout));
170 if (rets == 0)
171 return -ETIME;
172 if (rets < 0) {
173 if (signal_pending(current))
174 return -EINTR;
175 return -ERESTARTSYS;
176 }
177 } else {
178 if (wait_event_interruptible
179 (cl->rx_wait,
180 mei_cl_read_cb(cl, NULL) ||
181 (!mei_cl_is_connected(cl)))) {
182 if (signal_pending(current))
183 return -EINTR;
184 return -ERESTARTSYS;
185 }
186 }
187
188 mutex_lock(&bus->device_lock);
189
190 if (!mei_cl_is_connected(cl)) {
191 rets = -ENODEV;
192 goto out;
193 }
194 }
195
196 cb = mei_cl_read_cb(cl, NULL);
197 if (!cb) {
198 rets = 0;
199 goto out;
200 }
201
202 copy:
203 if (cb->status) {
204 rets = cb->status;
205 goto free;
206 }
207
208 r_length = min_t(size_t, length, cb->buf_idx);
209 memcpy(buf, cb->buf.data, r_length);
210 rets = r_length;
211 if (vtag)
212 *vtag = cb->vtag;
213
214 free:
215 mei_cl_del_rd_completed(cl, cb);
216 out:
217 mutex_unlock(&bus->device_lock);
218
219 return rets;
220 }
221
222 /**
223 * mei_cldev_send_vtag - me device send with vtag (write)
224 *
225 * @cldev: me client device
226 * @buf: buffer to send
227 * @length: buffer length
228 * @vtag: virtual tag
229 *
230 * Return:
231 * * written size in bytes
232 * * < 0 on error
233 */
234
mei_cldev_send_vtag(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag)235 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
236 size_t length, u8 vtag)
237 {
238 struct mei_cl *cl = cldev->cl;
239
240 return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
241 }
242 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
243
244 /**
245 * mei_cldev_recv_vtag - client receive with vtag (read)
246 *
247 * @cldev: me client device
248 * @buf: buffer to receive
249 * @length: buffer length
250 * @vtag: virtual tag
251 *
252 * Return:
253 * * read size in bytes
254 * * < 0 on error
255 */
256
mei_cldev_recv_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)257 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
258 u8 *vtag)
259 {
260 struct mei_cl *cl = cldev->cl;
261
262 return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
263 }
264 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
265
266 /**
267 * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
268 *
269 * @cldev: me client device
270 * @buf: buffer to receive
271 * @length: buffer length
272 * @vtag: virtual tag
273 *
274 * Return:
275 * * read size in bytes
276 * * -EAGAIN if function will block.
277 * * < 0 on other error
278 */
mei_cldev_recv_nonblock_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)279 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
280 size_t length, u8 *vtag)
281 {
282 struct mei_cl *cl = cldev->cl;
283
284 return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
285 }
286 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
287
288 /**
289 * mei_cldev_send - me device send (write)
290 *
291 * @cldev: me client device
292 * @buf: buffer to send
293 * @length: buffer length
294 *
295 * Return:
296 * * written size in bytes
297 * * < 0 on error
298 */
mei_cldev_send(struct mei_cl_device * cldev,const u8 * buf,size_t length)299 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
300 {
301 return mei_cldev_send_vtag(cldev, buf, length, 0);
302 }
303 EXPORT_SYMBOL_GPL(mei_cldev_send);
304
305 /**
306 * mei_cldev_recv - client receive (read)
307 *
308 * @cldev: me client device
309 * @buf: buffer to receive
310 * @length: buffer length
311 *
312 * Return: read size in bytes of < 0 on error
313 */
mei_cldev_recv(struct mei_cl_device * cldev,u8 * buf,size_t length)314 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
315 {
316 return mei_cldev_recv_vtag(cldev, buf, length, NULL);
317 }
318 EXPORT_SYMBOL_GPL(mei_cldev_recv);
319
320 /**
321 * mei_cldev_recv_nonblock - non block client receive (read)
322 *
323 * @cldev: me client device
324 * @buf: buffer to receive
325 * @length: buffer length
326 *
327 * Return: read size in bytes of < 0 on error
328 * -EAGAIN if function will block.
329 */
mei_cldev_recv_nonblock(struct mei_cl_device * cldev,u8 * buf,size_t length)330 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
331 size_t length)
332 {
333 return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
334 }
335 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
336
337 /**
338 * mei_cl_bus_rx_work - dispatch rx event for a bus device
339 *
340 * @work: work
341 */
mei_cl_bus_rx_work(struct work_struct * work)342 static void mei_cl_bus_rx_work(struct work_struct *work)
343 {
344 struct mei_cl_device *cldev;
345 struct mei_device *bus;
346
347 cldev = container_of(work, struct mei_cl_device, rx_work);
348
349 bus = cldev->bus;
350
351 if (cldev->rx_cb)
352 cldev->rx_cb(cldev);
353
354 mutex_lock(&bus->device_lock);
355 if (mei_cl_is_connected(cldev->cl))
356 mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
357 mutex_unlock(&bus->device_lock);
358 }
359
360 /**
361 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
362 *
363 * @work: work
364 */
mei_cl_bus_notif_work(struct work_struct * work)365 static void mei_cl_bus_notif_work(struct work_struct *work)
366 {
367 struct mei_cl_device *cldev;
368
369 cldev = container_of(work, struct mei_cl_device, notif_work);
370
371 if (cldev->notif_cb)
372 cldev->notif_cb(cldev);
373 }
374
375 /**
376 * mei_cl_bus_notify_event - schedule notify cb on bus client
377 *
378 * @cl: host client
379 *
380 * Return: true if event was scheduled
381 * false if the client is not waiting for event
382 */
mei_cl_bus_notify_event(struct mei_cl * cl)383 bool mei_cl_bus_notify_event(struct mei_cl *cl)
384 {
385 struct mei_cl_device *cldev = cl->cldev;
386
387 if (!cldev || !cldev->notif_cb)
388 return false;
389
390 if (!cl->notify_ev)
391 return false;
392
393 schedule_work(&cldev->notif_work);
394
395 cl->notify_ev = false;
396
397 return true;
398 }
399
400 /**
401 * mei_cl_bus_rx_event - schedule rx event
402 *
403 * @cl: host client
404 *
405 * Return: true if event was scheduled
406 * false if the client is not waiting for event
407 */
mei_cl_bus_rx_event(struct mei_cl * cl)408 bool mei_cl_bus_rx_event(struct mei_cl *cl)
409 {
410 struct mei_cl_device *cldev = cl->cldev;
411
412 if (!cldev || !cldev->rx_cb)
413 return false;
414
415 schedule_work(&cldev->rx_work);
416
417 return true;
418 }
419
420 /**
421 * mei_cldev_register_rx_cb - register Rx event callback
422 *
423 * @cldev: me client devices
424 * @rx_cb: callback function
425 *
426 * Return: 0 on success
427 * -EALREADY if an callback is already registered
428 * <0 on other errors
429 */
mei_cldev_register_rx_cb(struct mei_cl_device * cldev,mei_cldev_cb_t rx_cb)430 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
431 {
432 struct mei_device *bus = cldev->bus;
433 int ret;
434
435 if (!rx_cb)
436 return -EINVAL;
437 if (cldev->rx_cb)
438 return -EALREADY;
439
440 cldev->rx_cb = rx_cb;
441 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
442
443 mutex_lock(&bus->device_lock);
444 if (mei_cl_is_connected(cldev->cl))
445 ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
446 else
447 ret = -ENODEV;
448 mutex_unlock(&bus->device_lock);
449 if (ret && ret != -EBUSY) {
450 cancel_work_sync(&cldev->rx_work);
451 cldev->rx_cb = NULL;
452 return ret;
453 }
454
455 return 0;
456 }
457 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
458
459 /**
460 * mei_cldev_register_notif_cb - register FW notification event callback
461 *
462 * @cldev: me client devices
463 * @notif_cb: callback function
464 *
465 * Return: 0 on success
466 * -EALREADY if an callback is already registered
467 * <0 on other errors
468 */
mei_cldev_register_notif_cb(struct mei_cl_device * cldev,mei_cldev_cb_t notif_cb)469 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
470 mei_cldev_cb_t notif_cb)
471 {
472 struct mei_device *bus = cldev->bus;
473 int ret;
474
475 if (!notif_cb)
476 return -EINVAL;
477
478 if (cldev->notif_cb)
479 return -EALREADY;
480
481 cldev->notif_cb = notif_cb;
482 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
483
484 mutex_lock(&bus->device_lock);
485 ret = mei_cl_notify_request(cldev->cl, NULL, 1);
486 mutex_unlock(&bus->device_lock);
487 if (ret) {
488 cancel_work_sync(&cldev->notif_work);
489 cldev->notif_cb = NULL;
490 return ret;
491 }
492
493 return 0;
494 }
495 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
496
497 /**
498 * mei_cldev_get_drvdata - driver data getter
499 *
500 * @cldev: mei client device
501 *
502 * Return: driver private data
503 */
mei_cldev_get_drvdata(const struct mei_cl_device * cldev)504 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
505 {
506 return dev_get_drvdata(&cldev->dev);
507 }
508 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
509
510 /**
511 * mei_cldev_set_drvdata - driver data setter
512 *
513 * @cldev: mei client device
514 * @data: data to store
515 */
mei_cldev_set_drvdata(struct mei_cl_device * cldev,void * data)516 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
517 {
518 dev_set_drvdata(&cldev->dev, data);
519 }
520 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
521
522 /**
523 * mei_cldev_uuid - return uuid of the underlying me client
524 *
525 * @cldev: mei client device
526 *
527 * Return: me client uuid
528 */
mei_cldev_uuid(const struct mei_cl_device * cldev)529 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
530 {
531 return mei_me_cl_uuid(cldev->me_cl);
532 }
533 EXPORT_SYMBOL_GPL(mei_cldev_uuid);
534
535 /**
536 * mei_cldev_ver - return protocol version of the underlying me client
537 *
538 * @cldev: mei client device
539 *
540 * Return: me client protocol version
541 */
mei_cldev_ver(const struct mei_cl_device * cldev)542 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
543 {
544 return mei_me_cl_ver(cldev->me_cl);
545 }
546 EXPORT_SYMBOL_GPL(mei_cldev_ver);
547
548 /**
549 * mei_cldev_enabled - check whether the device is enabled
550 *
551 * @cldev: mei client device
552 *
553 * Return: true if me client is initialized and connected
554 */
mei_cldev_enabled(const struct mei_cl_device * cldev)555 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
556 {
557 return mei_cl_is_connected(cldev->cl);
558 }
559 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
560
561 /**
562 * mei_cl_bus_module_get - acquire module of the underlying
563 * hw driver.
564 *
565 * @cldev: mei client device
566 *
567 * Return: true on success; false if the module was removed.
568 */
mei_cl_bus_module_get(struct mei_cl_device * cldev)569 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
570 {
571 return try_module_get(cldev->bus->dev->driver->owner);
572 }
573
574 /**
575 * mei_cl_bus_module_put - release the underlying hw module.
576 *
577 * @cldev: mei client device
578 */
mei_cl_bus_module_put(struct mei_cl_device * cldev)579 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
580 {
581 module_put(cldev->bus->dev->driver->owner);
582 }
583
584 /**
585 * mei_cl_bus_vtag - get bus vtag entry wrapper
586 * The tag for bus client is always first.
587 *
588 * @cl: host client
589 *
590 * Return: bus vtag or NULL
591 */
mei_cl_bus_vtag(struct mei_cl * cl)592 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
593 {
594 return list_first_entry_or_null(&cl->vtag_map,
595 struct mei_cl_vtag, list);
596 }
597
598 /**
599 * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
600 *
601 * @cldev: me client device
602 *
603 * Return:
604 * * 0 on success
605 * * -ENOMEM if memory allocation failed
606 */
mei_cl_bus_vtag_alloc(struct mei_cl_device * cldev)607 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
608 {
609 struct mei_cl *cl = cldev->cl;
610 struct mei_cl_vtag *cl_vtag;
611
612 /*
613 * Bail out if the client does not supports vtags
614 * or has already allocated one
615 */
616 if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
617 return 0;
618
619 cl_vtag = mei_cl_vtag_alloc(NULL, 0);
620 if (IS_ERR(cl_vtag))
621 return -ENOMEM;
622
623 list_add_tail(&cl_vtag->list, &cl->vtag_map);
624
625 return 0;
626 }
627
628 /**
629 * mei_cl_bus_vtag_free - remove the bus entry from vtag map
630 *
631 * @cldev: me client device
632 */
mei_cl_bus_vtag_free(struct mei_cl_device * cldev)633 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
634 {
635 struct mei_cl *cl = cldev->cl;
636 struct mei_cl_vtag *cl_vtag;
637
638 cl_vtag = mei_cl_bus_vtag(cl);
639 if (!cl_vtag)
640 return;
641
642 list_del(&cl_vtag->list);
643 kfree(cl_vtag);
644 }
645
mei_cldev_dma_map(struct mei_cl_device * cldev,u8 buffer_id,size_t size)646 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
647 {
648 struct mei_device *bus;
649 struct mei_cl *cl;
650 int ret;
651
652 if (!cldev || !buffer_id || !size)
653 return ERR_PTR(-EINVAL);
654
655 if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
656 dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
657 MEI_FW_PAGE_SIZE);
658 return ERR_PTR(-EINVAL);
659 }
660
661 cl = cldev->cl;
662 bus = cldev->bus;
663
664 mutex_lock(&bus->device_lock);
665 if (cl->state == MEI_FILE_UNINITIALIZED) {
666 ret = mei_cl_link(cl);
667 if (ret)
668 goto notlinked;
669 /* update pointers */
670 cl->cldev = cldev;
671 }
672
673 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
674 if (ret)
675 mei_cl_unlink(cl);
676 notlinked:
677 mutex_unlock(&bus->device_lock);
678 if (ret)
679 return ERR_PTR(ret);
680 return cl->dma.vaddr;
681 }
682 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
683
mei_cldev_dma_unmap(struct mei_cl_device * cldev)684 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
685 {
686 struct mei_device *bus;
687 struct mei_cl *cl;
688 int ret;
689
690 if (!cldev)
691 return -EINVAL;
692
693 cl = cldev->cl;
694 bus = cldev->bus;
695
696 mutex_lock(&bus->device_lock);
697 ret = mei_cl_dma_unmap(cl, NULL);
698
699 mei_cl_flush_queues(cl, NULL);
700 mei_cl_unlink(cl);
701 mutex_unlock(&bus->device_lock);
702 return ret;
703 }
704 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
705
706 /**
707 * mei_cldev_enable - enable me client device
708 * create connection with me client
709 *
710 * @cldev: me client device
711 *
712 * Return: 0 on success and < 0 on error
713 */
mei_cldev_enable(struct mei_cl_device * cldev)714 int mei_cldev_enable(struct mei_cl_device *cldev)
715 {
716 struct mei_device *bus = cldev->bus;
717 struct mei_cl *cl;
718 int ret;
719
720 cl = cldev->cl;
721
722 mutex_lock(&bus->device_lock);
723 if (cl->state == MEI_FILE_UNINITIALIZED) {
724 ret = mei_cl_link(cl);
725 if (ret)
726 goto notlinked;
727 /* update pointers */
728 cl->cldev = cldev;
729 }
730
731 if (mei_cl_is_connected(cl)) {
732 ret = 0;
733 goto out;
734 }
735
736 if (!mei_me_cl_is_active(cldev->me_cl)) {
737 dev_err(&cldev->dev, "me client is not active\n");
738 ret = -ENOTTY;
739 goto out;
740 }
741
742 ret = mei_cl_bus_vtag_alloc(cldev);
743 if (ret)
744 goto out;
745
746 ret = mei_cl_connect(cl, cldev->me_cl, NULL);
747 if (ret < 0) {
748 dev_err(&cldev->dev, "cannot connect\n");
749 mei_cl_bus_vtag_free(cldev);
750 }
751
752 out:
753 if (ret)
754 mei_cl_unlink(cl);
755 notlinked:
756 mutex_unlock(&bus->device_lock);
757
758 return ret;
759 }
760 EXPORT_SYMBOL_GPL(mei_cldev_enable);
761
762 /**
763 * mei_cldev_unregister_callbacks - internal wrapper for unregistering
764 * callbacks.
765 *
766 * @cldev: client device
767 */
mei_cldev_unregister_callbacks(struct mei_cl_device * cldev)768 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
769 {
770 if (cldev->rx_cb) {
771 cancel_work_sync(&cldev->rx_work);
772 cldev->rx_cb = NULL;
773 }
774
775 if (cldev->notif_cb) {
776 cancel_work_sync(&cldev->notif_work);
777 cldev->notif_cb = NULL;
778 }
779 }
780
781 /**
782 * mei_cldev_disable - disable me client device
783 * disconnect form the me client
784 *
785 * @cldev: me client device
786 *
787 * Return: 0 on success and < 0 on error
788 */
mei_cldev_disable(struct mei_cl_device * cldev)789 int mei_cldev_disable(struct mei_cl_device *cldev)
790 {
791 struct mei_device *bus;
792 struct mei_cl *cl;
793 int err;
794
795 if (!cldev)
796 return -ENODEV;
797
798 cl = cldev->cl;
799
800 bus = cldev->bus;
801
802 mei_cldev_unregister_callbacks(cldev);
803
804 mutex_lock(&bus->device_lock);
805
806 mei_cl_bus_vtag_free(cldev);
807
808 if (!mei_cl_is_connected(cl)) {
809 dev_dbg(bus->dev, "Already disconnected\n");
810 err = 0;
811 goto out;
812 }
813
814 err = mei_cl_disconnect(cl);
815 if (err < 0)
816 dev_err(bus->dev, "Could not disconnect from the ME client\n");
817
818 out:
819 /* Flush queues and remove any pending read unless we have mapped DMA */
820 if (!cl->dma_mapped) {
821 mei_cl_flush_queues(cl, NULL);
822 mei_cl_unlink(cl);
823 }
824
825 mutex_unlock(&bus->device_lock);
826 return err;
827 }
828 EXPORT_SYMBOL_GPL(mei_cldev_disable);
829
830 /**
831 * mei_cl_device_find - find matching entry in the driver id table
832 *
833 * @cldev: me client device
834 * @cldrv: me client driver
835 *
836 * Return: id on success; NULL if no id is matching
837 */
838 static const
mei_cl_device_find(const struct mei_cl_device * cldev,const struct mei_cl_driver * cldrv)839 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
840 const struct mei_cl_driver *cldrv)
841 {
842 const struct mei_cl_device_id *id;
843 const uuid_le *uuid;
844 u8 version;
845 bool match;
846
847 uuid = mei_me_cl_uuid(cldev->me_cl);
848 version = mei_me_cl_ver(cldev->me_cl);
849
850 id = cldrv->id_table;
851 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
852 if (!uuid_le_cmp(*uuid, id->uuid)) {
853 match = true;
854
855 if (cldev->name[0])
856 if (strncmp(cldev->name, id->name,
857 sizeof(id->name)))
858 match = false;
859
860 if (id->version != MEI_CL_VERSION_ANY)
861 if (id->version != version)
862 match = false;
863 if (match)
864 return id;
865 }
866
867 id++;
868 }
869
870 return NULL;
871 }
872
873 /**
874 * mei_cl_device_match - device match function
875 *
876 * @dev: device
877 * @drv: driver
878 *
879 * Return: 1 if matching device was found 0 otherwise
880 */
mei_cl_device_match(struct device * dev,struct device_driver * drv)881 static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
882 {
883 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
884 const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
885 const struct mei_cl_device_id *found_id;
886
887 if (!cldev)
888 return 0;
889
890 if (!cldev->do_match)
891 return 0;
892
893 if (!cldrv || !cldrv->id_table)
894 return 0;
895
896 found_id = mei_cl_device_find(cldev, cldrv);
897 if (found_id)
898 return 1;
899
900 return 0;
901 }
902
903 /**
904 * mei_cl_device_probe - bus probe function
905 *
906 * @dev: device
907 *
908 * Return: 0 on success; < 0 otherwise
909 */
mei_cl_device_probe(struct device * dev)910 static int mei_cl_device_probe(struct device *dev)
911 {
912 struct mei_cl_device *cldev;
913 struct mei_cl_driver *cldrv;
914 const struct mei_cl_device_id *id;
915 int ret;
916
917 cldev = to_mei_cl_device(dev);
918 cldrv = to_mei_cl_driver(dev->driver);
919
920 if (!cldev)
921 return 0;
922
923 if (!cldrv || !cldrv->probe)
924 return -ENODEV;
925
926 id = mei_cl_device_find(cldev, cldrv);
927 if (!id)
928 return -ENODEV;
929
930 if (!mei_cl_bus_module_get(cldev)) {
931 dev_err(&cldev->dev, "get hw module failed");
932 return -ENODEV;
933 }
934
935 ret = cldrv->probe(cldev, id);
936 if (ret) {
937 mei_cl_bus_module_put(cldev);
938 return ret;
939 }
940
941 __module_get(THIS_MODULE);
942 return 0;
943 }
944
945 /**
946 * mei_cl_device_remove - remove device from the bus
947 *
948 * @dev: device
949 *
950 * Return: 0 on success; < 0 otherwise
951 */
mei_cl_device_remove(struct device * dev)952 static void mei_cl_device_remove(struct device *dev)
953 {
954 struct mei_cl_device *cldev = to_mei_cl_device(dev);
955 struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
956
957 if (cldrv->remove)
958 cldrv->remove(cldev);
959
960 mei_cldev_unregister_callbacks(cldev);
961
962 mei_cl_bus_module_put(cldev);
963 module_put(THIS_MODULE);
964 }
965
name_show(struct device * dev,struct device_attribute * a,char * buf)966 static ssize_t name_show(struct device *dev, struct device_attribute *a,
967 char *buf)
968 {
969 struct mei_cl_device *cldev = to_mei_cl_device(dev);
970
971 return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
972 }
973 static DEVICE_ATTR_RO(name);
974
uuid_show(struct device * dev,struct device_attribute * a,char * buf)975 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
976 char *buf)
977 {
978 struct mei_cl_device *cldev = to_mei_cl_device(dev);
979 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
980
981 return sprintf(buf, "%pUl", uuid);
982 }
983 static DEVICE_ATTR_RO(uuid);
984
version_show(struct device * dev,struct device_attribute * a,char * buf)985 static ssize_t version_show(struct device *dev, struct device_attribute *a,
986 char *buf)
987 {
988 struct mei_cl_device *cldev = to_mei_cl_device(dev);
989 u8 version = mei_me_cl_ver(cldev->me_cl);
990
991 return sprintf(buf, "%02X", version);
992 }
993 static DEVICE_ATTR_RO(version);
994
modalias_show(struct device * dev,struct device_attribute * a,char * buf)995 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
996 char *buf)
997 {
998 struct mei_cl_device *cldev = to_mei_cl_device(dev);
999 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1000 u8 version = mei_me_cl_ver(cldev->me_cl);
1001
1002 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
1003 cldev->name, uuid, version);
1004 }
1005 static DEVICE_ATTR_RO(modalias);
1006
max_conn_show(struct device * dev,struct device_attribute * a,char * buf)1007 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1008 char *buf)
1009 {
1010 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1011 u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1012
1013 return sprintf(buf, "%d", maxconn);
1014 }
1015 static DEVICE_ATTR_RO(max_conn);
1016
fixed_show(struct device * dev,struct device_attribute * a,char * buf)1017 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1018 char *buf)
1019 {
1020 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1021 u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1022
1023 return sprintf(buf, "%d", fixed);
1024 }
1025 static DEVICE_ATTR_RO(fixed);
1026
vtag_show(struct device * dev,struct device_attribute * a,char * buf)1027 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1028 char *buf)
1029 {
1030 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1031 bool vt = mei_me_cl_vt(cldev->me_cl);
1032
1033 return sprintf(buf, "%d", vt);
1034 }
1035 static DEVICE_ATTR_RO(vtag);
1036
max_len_show(struct device * dev,struct device_attribute * a,char * buf)1037 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1038 char *buf)
1039 {
1040 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1041 u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1042
1043 return sprintf(buf, "%u", maxlen);
1044 }
1045 static DEVICE_ATTR_RO(max_len);
1046
1047 static struct attribute *mei_cldev_attrs[] = {
1048 &dev_attr_name.attr,
1049 &dev_attr_uuid.attr,
1050 &dev_attr_version.attr,
1051 &dev_attr_modalias.attr,
1052 &dev_attr_max_conn.attr,
1053 &dev_attr_fixed.attr,
1054 &dev_attr_vtag.attr,
1055 &dev_attr_max_len.attr,
1056 NULL,
1057 };
1058 ATTRIBUTE_GROUPS(mei_cldev);
1059
1060 /**
1061 * mei_cl_device_uevent - me client bus uevent handler
1062 *
1063 * @dev: device
1064 * @env: uevent kobject
1065 *
1066 * Return: 0 on success -ENOMEM on when add_uevent_var fails
1067 */
mei_cl_device_uevent(struct device * dev,struct kobj_uevent_env * env)1068 static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
1069 {
1070 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1071 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1072 u8 version = mei_me_cl_ver(cldev->me_cl);
1073
1074 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1075 return -ENOMEM;
1076
1077 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1078 return -ENOMEM;
1079
1080 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1081 return -ENOMEM;
1082
1083 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1084 cldev->name, uuid, version))
1085 return -ENOMEM;
1086
1087 return 0;
1088 }
1089
1090 static struct bus_type mei_cl_bus_type = {
1091 .name = "mei",
1092 .dev_groups = mei_cldev_groups,
1093 .match = mei_cl_device_match,
1094 .probe = mei_cl_device_probe,
1095 .remove = mei_cl_device_remove,
1096 .uevent = mei_cl_device_uevent,
1097 };
1098
mei_dev_bus_get(struct mei_device * bus)1099 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1100 {
1101 if (bus)
1102 get_device(bus->dev);
1103
1104 return bus;
1105 }
1106
mei_dev_bus_put(struct mei_device * bus)1107 static void mei_dev_bus_put(struct mei_device *bus)
1108 {
1109 if (bus)
1110 put_device(bus->dev);
1111 }
1112
mei_cl_bus_dev_release(struct device * dev)1113 static void mei_cl_bus_dev_release(struct device *dev)
1114 {
1115 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1116
1117 if (!cldev)
1118 return;
1119
1120 mei_cl_flush_queues(cldev->cl, NULL);
1121 mei_me_cl_put(cldev->me_cl);
1122 mei_dev_bus_put(cldev->bus);
1123 kfree(cldev->cl);
1124 kfree(cldev);
1125 }
1126
1127 static const struct device_type mei_cl_device_type = {
1128 .release = mei_cl_bus_dev_release,
1129 };
1130
1131 /**
1132 * mei_cl_bus_set_name - set device name for me client device
1133 * <controller>-<client device>
1134 * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1135 *
1136 * @cldev: me client device
1137 */
mei_cl_bus_set_name(struct mei_cl_device * cldev)1138 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1139 {
1140 dev_set_name(&cldev->dev, "%s-%pUl",
1141 dev_name(cldev->bus->dev),
1142 mei_me_cl_uuid(cldev->me_cl));
1143 }
1144
1145 /**
1146 * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1147 *
1148 * @bus: mei device
1149 * @me_cl: me client
1150 *
1151 * Return: allocated device structur or NULL on allocation failure
1152 */
mei_cl_bus_dev_alloc(struct mei_device * bus,struct mei_me_client * me_cl)1153 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1154 struct mei_me_client *me_cl)
1155 {
1156 struct mei_cl_device *cldev;
1157 struct mei_cl *cl;
1158
1159 cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1160 if (!cldev)
1161 return NULL;
1162
1163 cl = mei_cl_allocate(bus);
1164 if (!cl) {
1165 kfree(cldev);
1166 return NULL;
1167 }
1168
1169 device_initialize(&cldev->dev);
1170 cldev->dev.parent = bus->dev;
1171 cldev->dev.bus = &mei_cl_bus_type;
1172 cldev->dev.type = &mei_cl_device_type;
1173 cldev->bus = mei_dev_bus_get(bus);
1174 cldev->me_cl = mei_me_cl_get(me_cl);
1175 cldev->cl = cl;
1176 mei_cl_bus_set_name(cldev);
1177 cldev->is_added = 0;
1178 INIT_LIST_HEAD(&cldev->bus_list);
1179
1180 return cldev;
1181 }
1182
1183 /**
1184 * mei_cl_bus_dev_setup - setup me client device
1185 * run fix up routines and set the device name
1186 *
1187 * @bus: mei device
1188 * @cldev: me client device
1189 *
1190 * Return: true if the device is eligible for enumeration
1191 */
mei_cl_bus_dev_setup(struct mei_device * bus,struct mei_cl_device * cldev)1192 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1193 struct mei_cl_device *cldev)
1194 {
1195 cldev->do_match = 1;
1196 mei_cl_bus_dev_fixup(cldev);
1197
1198 /* the device name can change during fix up */
1199 if (cldev->do_match)
1200 mei_cl_bus_set_name(cldev);
1201
1202 return cldev->do_match == 1;
1203 }
1204
1205 /**
1206 * mei_cl_bus_dev_add - add me client devices
1207 *
1208 * @cldev: me client device
1209 *
1210 * Return: 0 on success; < 0 on failre
1211 */
mei_cl_bus_dev_add(struct mei_cl_device * cldev)1212 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1213 {
1214 int ret;
1215
1216 dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1217 mei_me_cl_uuid(cldev->me_cl),
1218 mei_me_cl_ver(cldev->me_cl));
1219 ret = device_add(&cldev->dev);
1220 if (!ret)
1221 cldev->is_added = 1;
1222
1223 return ret;
1224 }
1225
1226 /**
1227 * mei_cl_bus_dev_stop - stop the driver
1228 *
1229 * @cldev: me client device
1230 */
mei_cl_bus_dev_stop(struct mei_cl_device * cldev)1231 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1232 {
1233 if (cldev->is_added)
1234 device_release_driver(&cldev->dev);
1235 }
1236
1237 /**
1238 * mei_cl_bus_dev_destroy - destroy me client devices object
1239 *
1240 * @cldev: me client device
1241 *
1242 * Locking: called under "dev->cl_bus_lock" lock
1243 */
mei_cl_bus_dev_destroy(struct mei_cl_device * cldev)1244 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1245 {
1246
1247 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1248
1249 if (!cldev->is_added)
1250 return;
1251
1252 device_del(&cldev->dev);
1253
1254 list_del_init(&cldev->bus_list);
1255
1256 cldev->is_added = 0;
1257 put_device(&cldev->dev);
1258 }
1259
1260 /**
1261 * mei_cl_bus_remove_device - remove a devices form the bus
1262 *
1263 * @cldev: me client device
1264 */
mei_cl_bus_remove_device(struct mei_cl_device * cldev)1265 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1266 {
1267 mei_cl_bus_dev_stop(cldev);
1268 mei_cl_bus_dev_destroy(cldev);
1269 }
1270
1271 /**
1272 * mei_cl_bus_remove_devices - remove all devices form the bus
1273 *
1274 * @bus: mei device
1275 */
mei_cl_bus_remove_devices(struct mei_device * bus)1276 void mei_cl_bus_remove_devices(struct mei_device *bus)
1277 {
1278 struct mei_cl_device *cldev, *next;
1279
1280 mutex_lock(&bus->cl_bus_lock);
1281 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1282 mei_cl_bus_remove_device(cldev);
1283 mutex_unlock(&bus->cl_bus_lock);
1284 }
1285
1286
1287 /**
1288 * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1289 * based on me client
1290 *
1291 * @bus: mei device
1292 * @me_cl: me client
1293 *
1294 * Locking: called under "dev->cl_bus_lock" lock
1295 */
mei_cl_bus_dev_init(struct mei_device * bus,struct mei_me_client * me_cl)1296 static void mei_cl_bus_dev_init(struct mei_device *bus,
1297 struct mei_me_client *me_cl)
1298 {
1299 struct mei_cl_device *cldev;
1300
1301 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1302
1303 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1304
1305 if (me_cl->bus_added)
1306 return;
1307
1308 cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1309 if (!cldev)
1310 return;
1311
1312 me_cl->bus_added = true;
1313 list_add_tail(&cldev->bus_list, &bus->device_list);
1314
1315 }
1316
1317 /**
1318 * mei_cl_bus_rescan - scan me clients list and add create
1319 * devices for eligible clients
1320 *
1321 * @bus: mei device
1322 */
mei_cl_bus_rescan(struct mei_device * bus)1323 static void mei_cl_bus_rescan(struct mei_device *bus)
1324 {
1325 struct mei_cl_device *cldev, *n;
1326 struct mei_me_client *me_cl;
1327
1328 mutex_lock(&bus->cl_bus_lock);
1329
1330 down_read(&bus->me_clients_rwsem);
1331 list_for_each_entry(me_cl, &bus->me_clients, list)
1332 mei_cl_bus_dev_init(bus, me_cl);
1333 up_read(&bus->me_clients_rwsem);
1334
1335 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1336
1337 if (!mei_me_cl_is_active(cldev->me_cl)) {
1338 mei_cl_bus_remove_device(cldev);
1339 continue;
1340 }
1341
1342 if (cldev->is_added)
1343 continue;
1344
1345 if (mei_cl_bus_dev_setup(bus, cldev))
1346 mei_cl_bus_dev_add(cldev);
1347 else {
1348 list_del_init(&cldev->bus_list);
1349 put_device(&cldev->dev);
1350 }
1351 }
1352 mutex_unlock(&bus->cl_bus_lock);
1353
1354 dev_dbg(bus->dev, "rescan end");
1355 }
1356
mei_cl_bus_rescan_work(struct work_struct * work)1357 void mei_cl_bus_rescan_work(struct work_struct *work)
1358 {
1359 struct mei_device *bus =
1360 container_of(work, struct mei_device, bus_rescan_work);
1361
1362 mei_cl_bus_rescan(bus);
1363 }
1364
__mei_cldev_driver_register(struct mei_cl_driver * cldrv,struct module * owner)1365 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1366 struct module *owner)
1367 {
1368 int err;
1369
1370 cldrv->driver.name = cldrv->name;
1371 cldrv->driver.owner = owner;
1372 cldrv->driver.bus = &mei_cl_bus_type;
1373
1374 err = driver_register(&cldrv->driver);
1375 if (err)
1376 return err;
1377
1378 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1379
1380 return 0;
1381 }
1382 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1383
mei_cldev_driver_unregister(struct mei_cl_driver * cldrv)1384 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1385 {
1386 driver_unregister(&cldrv->driver);
1387
1388 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1389 }
1390 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1391
1392
mei_cl_bus_init(void)1393 int __init mei_cl_bus_init(void)
1394 {
1395 return bus_register(&mei_cl_bus_type);
1396 }
1397
mei_cl_bus_exit(void)1398 void __exit mei_cl_bus_exit(void)
1399 {
1400 bus_unregister(&mei_cl_bus_type);
1401 }
1402