1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/mei_cl_bus.h>
17 
18 #include "mei_dev.h"
19 #include "client.h"
20 
21 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
22 
23 /**
24  * __mei_cl_send - internal client send (write)
25  *
26  * @cl: host client
27  * @buf: buffer to send
28  * @length: buffer length
29  * @vtag: virtual tag
30  * @mode: sending mode
31  *
32  * Return: written size bytes or < 0 on error
33  */
__mei_cl_send(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode)34 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
35 		      unsigned int mode)
36 {
37 	struct mei_device *bus;
38 	struct mei_cl_cb *cb;
39 	ssize_t rets;
40 
41 	if (WARN_ON(!cl || !cl->dev))
42 		return -ENODEV;
43 
44 	bus = cl->dev;
45 
46 	mutex_lock(&bus->device_lock);
47 	if (bus->dev_state != MEI_DEV_ENABLED &&
48 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
49 		rets = -ENODEV;
50 		goto out;
51 	}
52 
53 	if (!mei_cl_is_connected(cl)) {
54 		rets = -ENODEV;
55 		goto out;
56 	}
57 
58 	/* Check if we have an ME client device */
59 	if (!mei_me_cl_is_active(cl->me_cl)) {
60 		rets = -ENOTTY;
61 		goto out;
62 	}
63 
64 	if (vtag) {
65 		/* Check if vtag is supported by client */
66 		rets = mei_cl_vt_support_check(cl);
67 		if (rets)
68 			goto out;
69 	}
70 
71 	if (length > mei_cl_mtu(cl)) {
72 		rets = -EFBIG;
73 		goto out;
74 	}
75 
76 	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
77 		mutex_unlock(&bus->device_lock);
78 		rets = wait_event_interruptible(cl->tx_wait,
79 				cl->writing_state == MEI_WRITE_COMPLETE ||
80 				(!mei_cl_is_connected(cl)));
81 		mutex_lock(&bus->device_lock);
82 		if (rets) {
83 			if (signal_pending(current))
84 				rets = -EINTR;
85 			goto out;
86 		}
87 		if (!mei_cl_is_connected(cl)) {
88 			rets = -ENODEV;
89 			goto out;
90 		}
91 	}
92 
93 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
94 	if (!cb) {
95 		rets = -ENOMEM;
96 		goto out;
97 	}
98 	cb->vtag = vtag;
99 
100 	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
101 	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
102 	memcpy(cb->buf.data, buf, length);
103 
104 	rets = mei_cl_write(cl, cb);
105 
106 out:
107 	mutex_unlock(&bus->device_lock);
108 
109 	return rets;
110 }
111 
112 /**
113  * __mei_cl_recv - internal client receive (read)
114  *
115  * @cl: host client
116  * @buf: buffer to receive
117  * @length: buffer length
118  * @mode: io mode
119  * @vtag: virtual tag
120  * @timeout: recv timeout, 0 for infinite timeout
121  *
122  * Return: read size in bytes of < 0 on error
123  */
__mei_cl_recv(struct mei_cl * cl,u8 * buf,size_t length,u8 * vtag,unsigned int mode,unsigned long timeout)124 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
125 		      unsigned int mode, unsigned long timeout)
126 {
127 	struct mei_device *bus;
128 	struct mei_cl_cb *cb;
129 	size_t r_length;
130 	ssize_t rets;
131 	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
132 
133 	if (WARN_ON(!cl || !cl->dev))
134 		return -ENODEV;
135 
136 	bus = cl->dev;
137 
138 	mutex_lock(&bus->device_lock);
139 	if (bus->dev_state != MEI_DEV_ENABLED &&
140 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
141 		rets = -ENODEV;
142 		goto out;
143 	}
144 
145 	cb = mei_cl_read_cb(cl, NULL);
146 	if (cb)
147 		goto copy;
148 
149 	rets = mei_cl_read_start(cl, length, NULL);
150 	if (rets && rets != -EBUSY)
151 		goto out;
152 
153 	if (nonblock) {
154 		rets = -EAGAIN;
155 		goto out;
156 	}
157 
158 	/* wait on event only if there is no other waiter */
159 	/* synchronized under device mutex */
160 	if (!waitqueue_active(&cl->rx_wait)) {
161 
162 		mutex_unlock(&bus->device_lock);
163 
164 		if (timeout) {
165 			rets = wait_event_interruptible_timeout
166 					(cl->rx_wait,
167 					mei_cl_read_cb(cl, NULL) ||
168 					(!mei_cl_is_connected(cl)),
169 					msecs_to_jiffies(timeout));
170 			if (rets == 0)
171 				return -ETIME;
172 			if (rets < 0) {
173 				if (signal_pending(current))
174 					return -EINTR;
175 				return -ERESTARTSYS;
176 			}
177 		} else {
178 			if (wait_event_interruptible
179 					(cl->rx_wait,
180 					mei_cl_read_cb(cl, NULL) ||
181 					(!mei_cl_is_connected(cl)))) {
182 				if (signal_pending(current))
183 					return -EINTR;
184 				return -ERESTARTSYS;
185 			}
186 		}
187 
188 		mutex_lock(&bus->device_lock);
189 
190 		if (!mei_cl_is_connected(cl)) {
191 			rets = -ENODEV;
192 			goto out;
193 		}
194 	}
195 
196 	cb = mei_cl_read_cb(cl, NULL);
197 	if (!cb) {
198 		rets = 0;
199 		goto out;
200 	}
201 
202 copy:
203 	if (cb->status) {
204 		rets = cb->status;
205 		goto free;
206 	}
207 
208 	r_length = min_t(size_t, length, cb->buf_idx);
209 	memcpy(buf, cb->buf.data, r_length);
210 	rets = r_length;
211 	if (vtag)
212 		*vtag = cb->vtag;
213 
214 free:
215 	mei_cl_del_rd_completed(cl, cb);
216 out:
217 	mutex_unlock(&bus->device_lock);
218 
219 	return rets;
220 }
221 
222 /**
223  * mei_cldev_send_vtag - me device send with vtag  (write)
224  *
225  * @cldev: me client device
226  * @buf: buffer to send
227  * @length: buffer length
228  * @vtag: virtual tag
229  *
230  * Return:
231  *  * written size in bytes
232  *  * < 0 on error
233  */
234 
mei_cldev_send_vtag(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag)235 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
236 			    size_t length, u8 vtag)
237 {
238 	struct mei_cl *cl = cldev->cl;
239 
240 	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
241 }
242 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
243 
244 /**
245  * mei_cldev_recv_vtag - client receive with vtag (read)
246  *
247  * @cldev: me client device
248  * @buf: buffer to receive
249  * @length: buffer length
250  * @vtag: virtual tag
251  *
252  * Return:
253  * * read size in bytes
254  * *  < 0 on error
255  */
256 
mei_cldev_recv_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)257 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
258 			    u8 *vtag)
259 {
260 	struct mei_cl *cl = cldev->cl;
261 
262 	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
263 }
264 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
265 
266 /**
267  * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
268  *
269  * @cldev: me client device
270  * @buf: buffer to receive
271  * @length: buffer length
272  * @vtag: virtual tag
273  *
274  * Return:
275  * * read size in bytes
276  * * -EAGAIN if function will block.
277  * * < 0 on other error
278  */
mei_cldev_recv_nonblock_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)279 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
280 				     size_t length, u8 *vtag)
281 {
282 	struct mei_cl *cl = cldev->cl;
283 
284 	return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
285 }
286 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
287 
288 /**
289  * mei_cldev_send - me device send  (write)
290  *
291  * @cldev: me client device
292  * @buf: buffer to send
293  * @length: buffer length
294  *
295  * Return:
296  *  * written size in bytes
297  *  * < 0 on error
298  */
mei_cldev_send(struct mei_cl_device * cldev,const u8 * buf,size_t length)299 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
300 {
301 	return mei_cldev_send_vtag(cldev, buf, length, 0);
302 }
303 EXPORT_SYMBOL_GPL(mei_cldev_send);
304 
305 /**
306  * mei_cldev_recv - client receive (read)
307  *
308  * @cldev: me client device
309  * @buf: buffer to receive
310  * @length: buffer length
311  *
312  * Return: read size in bytes of < 0 on error
313  */
mei_cldev_recv(struct mei_cl_device * cldev,u8 * buf,size_t length)314 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
315 {
316 	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
317 }
318 EXPORT_SYMBOL_GPL(mei_cldev_recv);
319 
320 /**
321  * mei_cldev_recv_nonblock - non block client receive (read)
322  *
323  * @cldev: me client device
324  * @buf: buffer to receive
325  * @length: buffer length
326  *
327  * Return: read size in bytes of < 0 on error
328  *         -EAGAIN if function will block.
329  */
mei_cldev_recv_nonblock(struct mei_cl_device * cldev,u8 * buf,size_t length)330 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
331 				size_t length)
332 {
333 	return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
334 }
335 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
336 
337 /**
338  * mei_cl_bus_rx_work - dispatch rx event for a bus device
339  *
340  * @work: work
341  */
mei_cl_bus_rx_work(struct work_struct * work)342 static void mei_cl_bus_rx_work(struct work_struct *work)
343 {
344 	struct mei_cl_device *cldev;
345 	struct mei_device *bus;
346 
347 	cldev = container_of(work, struct mei_cl_device, rx_work);
348 
349 	bus = cldev->bus;
350 
351 	if (cldev->rx_cb)
352 		cldev->rx_cb(cldev);
353 
354 	mutex_lock(&bus->device_lock);
355 	if (mei_cl_is_connected(cldev->cl))
356 		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
357 	mutex_unlock(&bus->device_lock);
358 }
359 
360 /**
361  * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
362  *
363  * @work: work
364  */
mei_cl_bus_notif_work(struct work_struct * work)365 static void mei_cl_bus_notif_work(struct work_struct *work)
366 {
367 	struct mei_cl_device *cldev;
368 
369 	cldev = container_of(work, struct mei_cl_device, notif_work);
370 
371 	if (cldev->notif_cb)
372 		cldev->notif_cb(cldev);
373 }
374 
375 /**
376  * mei_cl_bus_notify_event - schedule notify cb on bus client
377  *
378  * @cl: host client
379  *
380  * Return: true if event was scheduled
381  *         false if the client is not waiting for event
382  */
mei_cl_bus_notify_event(struct mei_cl * cl)383 bool mei_cl_bus_notify_event(struct mei_cl *cl)
384 {
385 	struct mei_cl_device *cldev = cl->cldev;
386 
387 	if (!cldev || !cldev->notif_cb)
388 		return false;
389 
390 	if (!cl->notify_ev)
391 		return false;
392 
393 	schedule_work(&cldev->notif_work);
394 
395 	cl->notify_ev = false;
396 
397 	return true;
398 }
399 
400 /**
401  * mei_cl_bus_rx_event - schedule rx event
402  *
403  * @cl: host client
404  *
405  * Return: true if event was scheduled
406  *         false if the client is not waiting for event
407  */
mei_cl_bus_rx_event(struct mei_cl * cl)408 bool mei_cl_bus_rx_event(struct mei_cl *cl)
409 {
410 	struct mei_cl_device *cldev = cl->cldev;
411 
412 	if (!cldev || !cldev->rx_cb)
413 		return false;
414 
415 	schedule_work(&cldev->rx_work);
416 
417 	return true;
418 }
419 
420 /**
421  * mei_cldev_register_rx_cb - register Rx event callback
422  *
423  * @cldev: me client devices
424  * @rx_cb: callback function
425  *
426  * Return: 0 on success
427  *         -EALREADY if an callback is already registered
428  *         <0 on other errors
429  */
mei_cldev_register_rx_cb(struct mei_cl_device * cldev,mei_cldev_cb_t rx_cb)430 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
431 {
432 	struct mei_device *bus = cldev->bus;
433 	int ret;
434 
435 	if (!rx_cb)
436 		return -EINVAL;
437 	if (cldev->rx_cb)
438 		return -EALREADY;
439 
440 	cldev->rx_cb = rx_cb;
441 	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
442 
443 	mutex_lock(&bus->device_lock);
444 	if (mei_cl_is_connected(cldev->cl))
445 		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
446 	else
447 		ret = -ENODEV;
448 	mutex_unlock(&bus->device_lock);
449 	if (ret && ret != -EBUSY) {
450 		cancel_work_sync(&cldev->rx_work);
451 		cldev->rx_cb = NULL;
452 		return ret;
453 	}
454 
455 	return 0;
456 }
457 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
458 
459 /**
460  * mei_cldev_register_notif_cb - register FW notification event callback
461  *
462  * @cldev: me client devices
463  * @notif_cb: callback function
464  *
465  * Return: 0 on success
466  *         -EALREADY if an callback is already registered
467  *         <0 on other errors
468  */
mei_cldev_register_notif_cb(struct mei_cl_device * cldev,mei_cldev_cb_t notif_cb)469 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
470 				mei_cldev_cb_t notif_cb)
471 {
472 	struct mei_device *bus = cldev->bus;
473 	int ret;
474 
475 	if (!notif_cb)
476 		return -EINVAL;
477 
478 	if (cldev->notif_cb)
479 		return -EALREADY;
480 
481 	cldev->notif_cb = notif_cb;
482 	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
483 
484 	mutex_lock(&bus->device_lock);
485 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
486 	mutex_unlock(&bus->device_lock);
487 	if (ret) {
488 		cancel_work_sync(&cldev->notif_work);
489 		cldev->notif_cb = NULL;
490 		return ret;
491 	}
492 
493 	return 0;
494 }
495 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
496 
497 /**
498  * mei_cldev_get_drvdata - driver data getter
499  *
500  * @cldev: mei client device
501  *
502  * Return: driver private data
503  */
mei_cldev_get_drvdata(const struct mei_cl_device * cldev)504 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
505 {
506 	return dev_get_drvdata(&cldev->dev);
507 }
508 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
509 
510 /**
511  * mei_cldev_set_drvdata - driver data setter
512  *
513  * @cldev: mei client device
514  * @data: data to store
515  */
mei_cldev_set_drvdata(struct mei_cl_device * cldev,void * data)516 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
517 {
518 	dev_set_drvdata(&cldev->dev, data);
519 }
520 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
521 
522 /**
523  * mei_cldev_uuid - return uuid of the underlying me client
524  *
525  * @cldev: mei client device
526  *
527  * Return: me client uuid
528  */
mei_cldev_uuid(const struct mei_cl_device * cldev)529 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
530 {
531 	return mei_me_cl_uuid(cldev->me_cl);
532 }
533 EXPORT_SYMBOL_GPL(mei_cldev_uuid);
534 
535 /**
536  * mei_cldev_ver - return protocol version of the underlying me client
537  *
538  * @cldev: mei client device
539  *
540  * Return: me client protocol version
541  */
mei_cldev_ver(const struct mei_cl_device * cldev)542 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
543 {
544 	return mei_me_cl_ver(cldev->me_cl);
545 }
546 EXPORT_SYMBOL_GPL(mei_cldev_ver);
547 
548 /**
549  * mei_cldev_enabled - check whether the device is enabled
550  *
551  * @cldev: mei client device
552  *
553  * Return: true if me client is initialized and connected
554  */
mei_cldev_enabled(const struct mei_cl_device * cldev)555 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
556 {
557 	return mei_cl_is_connected(cldev->cl);
558 }
559 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
560 
561 /**
562  * mei_cl_bus_module_get - acquire module of the underlying
563  *    hw driver.
564  *
565  * @cldev: mei client device
566  *
567  * Return: true on success; false if the module was removed.
568  */
mei_cl_bus_module_get(struct mei_cl_device * cldev)569 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
570 {
571 	return try_module_get(cldev->bus->dev->driver->owner);
572 }
573 
574 /**
575  * mei_cl_bus_module_put -  release the underlying hw module.
576  *
577  * @cldev: mei client device
578  */
mei_cl_bus_module_put(struct mei_cl_device * cldev)579 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
580 {
581 	module_put(cldev->bus->dev->driver->owner);
582 }
583 
584 /**
585  * mei_cl_bus_vtag - get bus vtag entry wrapper
586  *     The tag for bus client is always first.
587  *
588  * @cl: host client
589  *
590  * Return: bus vtag or NULL
591  */
mei_cl_bus_vtag(struct mei_cl * cl)592 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
593 {
594 	return list_first_entry_or_null(&cl->vtag_map,
595 					struct mei_cl_vtag, list);
596 }
597 
598 /**
599  * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
600  *
601  * @cldev: me client device
602  *
603  * Return:
604  * * 0 on success
605  * * -ENOMEM if memory allocation failed
606  */
mei_cl_bus_vtag_alloc(struct mei_cl_device * cldev)607 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
608 {
609 	struct mei_cl *cl = cldev->cl;
610 	struct mei_cl_vtag *cl_vtag;
611 
612 	/*
613 	 * Bail out if the client does not supports vtags
614 	 * or has already allocated one
615 	 */
616 	if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
617 		return 0;
618 
619 	cl_vtag = mei_cl_vtag_alloc(NULL, 0);
620 	if (IS_ERR(cl_vtag))
621 		return -ENOMEM;
622 
623 	list_add_tail(&cl_vtag->list, &cl->vtag_map);
624 
625 	return 0;
626 }
627 
628 /**
629  * mei_cl_bus_vtag_free - remove the bus entry from vtag map
630  *
631  * @cldev: me client device
632  */
mei_cl_bus_vtag_free(struct mei_cl_device * cldev)633 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
634 {
635 	struct mei_cl *cl = cldev->cl;
636 	struct mei_cl_vtag *cl_vtag;
637 
638 	cl_vtag = mei_cl_bus_vtag(cl);
639 	if (!cl_vtag)
640 		return;
641 
642 	list_del(&cl_vtag->list);
643 	kfree(cl_vtag);
644 }
645 
mei_cldev_dma_map(struct mei_cl_device * cldev,u8 buffer_id,size_t size)646 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
647 {
648 	struct mei_device *bus;
649 	struct mei_cl *cl;
650 	int ret;
651 
652 	if (!cldev || !buffer_id || !size)
653 		return ERR_PTR(-EINVAL);
654 
655 	if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
656 		dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
657 			MEI_FW_PAGE_SIZE);
658 		return ERR_PTR(-EINVAL);
659 	}
660 
661 	cl = cldev->cl;
662 	bus = cldev->bus;
663 
664 	mutex_lock(&bus->device_lock);
665 	if (cl->state == MEI_FILE_UNINITIALIZED) {
666 		ret = mei_cl_link(cl);
667 		if (ret)
668 			goto out;
669 		/* update pointers */
670 		cl->cldev = cldev;
671 	}
672 
673 	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
674 out:
675 	mutex_unlock(&bus->device_lock);
676 	if (ret)
677 		return ERR_PTR(ret);
678 	return cl->dma.vaddr;
679 }
680 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
681 
mei_cldev_dma_unmap(struct mei_cl_device * cldev)682 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
683 {
684 	struct mei_device *bus;
685 	struct mei_cl *cl;
686 	int ret;
687 
688 	if (!cldev)
689 		return -EINVAL;
690 
691 	cl = cldev->cl;
692 	bus = cldev->bus;
693 
694 	mutex_lock(&bus->device_lock);
695 	ret = mei_cl_dma_unmap(cl, NULL);
696 
697 	mei_cl_flush_queues(cl, NULL);
698 	mei_cl_unlink(cl);
699 	mutex_unlock(&bus->device_lock);
700 	return ret;
701 }
702 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
703 
704 /**
705  * mei_cldev_enable - enable me client device
706  *     create connection with me client
707  *
708  * @cldev: me client device
709  *
710  * Return: 0 on success and < 0 on error
711  */
mei_cldev_enable(struct mei_cl_device * cldev)712 int mei_cldev_enable(struct mei_cl_device *cldev)
713 {
714 	struct mei_device *bus = cldev->bus;
715 	struct mei_cl *cl;
716 	int ret;
717 
718 	cl = cldev->cl;
719 
720 	mutex_lock(&bus->device_lock);
721 	if (cl->state == MEI_FILE_UNINITIALIZED) {
722 		ret = mei_cl_link(cl);
723 		if (ret)
724 			goto out;
725 		/* update pointers */
726 		cl->cldev = cldev;
727 	}
728 
729 	if (mei_cl_is_connected(cl)) {
730 		ret = 0;
731 		goto out;
732 	}
733 
734 	if (!mei_me_cl_is_active(cldev->me_cl)) {
735 		dev_err(&cldev->dev, "me client is not active\n");
736 		ret = -ENOTTY;
737 		goto out;
738 	}
739 
740 	ret = mei_cl_bus_vtag_alloc(cldev);
741 	if (ret)
742 		goto out;
743 
744 	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
745 	if (ret < 0) {
746 		dev_err(&cldev->dev, "cannot connect\n");
747 		mei_cl_bus_vtag_free(cldev);
748 	}
749 
750 out:
751 	mutex_unlock(&bus->device_lock);
752 
753 	return ret;
754 }
755 EXPORT_SYMBOL_GPL(mei_cldev_enable);
756 
757 /**
758  * mei_cldev_unregister_callbacks - internal wrapper for unregistering
759  *  callbacks.
760  *
761  * @cldev: client device
762  */
mei_cldev_unregister_callbacks(struct mei_cl_device * cldev)763 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
764 {
765 	if (cldev->rx_cb) {
766 		cancel_work_sync(&cldev->rx_work);
767 		cldev->rx_cb = NULL;
768 	}
769 
770 	if (cldev->notif_cb) {
771 		cancel_work_sync(&cldev->notif_work);
772 		cldev->notif_cb = NULL;
773 	}
774 }
775 
776 /**
777  * mei_cldev_disable - disable me client device
778  *     disconnect form the me client
779  *
780  * @cldev: me client device
781  *
782  * Return: 0 on success and < 0 on error
783  */
mei_cldev_disable(struct mei_cl_device * cldev)784 int mei_cldev_disable(struct mei_cl_device *cldev)
785 {
786 	struct mei_device *bus;
787 	struct mei_cl *cl;
788 	int err;
789 
790 	if (!cldev)
791 		return -ENODEV;
792 
793 	cl = cldev->cl;
794 
795 	bus = cldev->bus;
796 
797 	mei_cldev_unregister_callbacks(cldev);
798 
799 	mutex_lock(&bus->device_lock);
800 
801 	mei_cl_bus_vtag_free(cldev);
802 
803 	if (!mei_cl_is_connected(cl)) {
804 		dev_dbg(bus->dev, "Already disconnected\n");
805 		err = 0;
806 		goto out;
807 	}
808 
809 	err = mei_cl_disconnect(cl);
810 	if (err < 0)
811 		dev_err(bus->dev, "Could not disconnect from the ME client\n");
812 
813 out:
814 	/* Flush queues and remove any pending read unless we have mapped DMA */
815 	if (!cl->dma_mapped) {
816 		mei_cl_flush_queues(cl, NULL);
817 		mei_cl_unlink(cl);
818 	}
819 
820 	mutex_unlock(&bus->device_lock);
821 	return err;
822 }
823 EXPORT_SYMBOL_GPL(mei_cldev_disable);
824 
825 /**
826  * mei_cl_device_find - find matching entry in the driver id table
827  *
828  * @cldev: me client device
829  * @cldrv: me client driver
830  *
831  * Return: id on success; NULL if no id is matching
832  */
833 static const
mei_cl_device_find(const struct mei_cl_device * cldev,const struct mei_cl_driver * cldrv)834 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
835 					    const struct mei_cl_driver *cldrv)
836 {
837 	const struct mei_cl_device_id *id;
838 	const uuid_le *uuid;
839 	u8 version;
840 	bool match;
841 
842 	uuid = mei_me_cl_uuid(cldev->me_cl);
843 	version = mei_me_cl_ver(cldev->me_cl);
844 
845 	id = cldrv->id_table;
846 	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
847 		if (!uuid_le_cmp(*uuid, id->uuid)) {
848 			match = true;
849 
850 			if (cldev->name[0])
851 				if (strncmp(cldev->name, id->name,
852 					    sizeof(id->name)))
853 					match = false;
854 
855 			if (id->version != MEI_CL_VERSION_ANY)
856 				if (id->version != version)
857 					match = false;
858 			if (match)
859 				return id;
860 		}
861 
862 		id++;
863 	}
864 
865 	return NULL;
866 }
867 
868 /**
869  * mei_cl_device_match  - device match function
870  *
871  * @dev: device
872  * @drv: driver
873  *
874  * Return:  1 if matching device was found 0 otherwise
875  */
mei_cl_device_match(struct device * dev,struct device_driver * drv)876 static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
877 {
878 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
879 	const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
880 	const struct mei_cl_device_id *found_id;
881 
882 	if (!cldev)
883 		return 0;
884 
885 	if (!cldev->do_match)
886 		return 0;
887 
888 	if (!cldrv || !cldrv->id_table)
889 		return 0;
890 
891 	found_id = mei_cl_device_find(cldev, cldrv);
892 	if (found_id)
893 		return 1;
894 
895 	return 0;
896 }
897 
898 /**
899  * mei_cl_device_probe - bus probe function
900  *
901  * @dev: device
902  *
903  * Return:  0 on success; < 0 otherwise
904  */
mei_cl_device_probe(struct device * dev)905 static int mei_cl_device_probe(struct device *dev)
906 {
907 	struct mei_cl_device *cldev;
908 	struct mei_cl_driver *cldrv;
909 	const struct mei_cl_device_id *id;
910 	int ret;
911 
912 	cldev = to_mei_cl_device(dev);
913 	cldrv = to_mei_cl_driver(dev->driver);
914 
915 	if (!cldev)
916 		return 0;
917 
918 	if (!cldrv || !cldrv->probe)
919 		return -ENODEV;
920 
921 	id = mei_cl_device_find(cldev, cldrv);
922 	if (!id)
923 		return -ENODEV;
924 
925 	if (!mei_cl_bus_module_get(cldev)) {
926 		dev_err(&cldev->dev, "get hw module failed");
927 		return -ENODEV;
928 	}
929 
930 	ret = cldrv->probe(cldev, id);
931 	if (ret) {
932 		mei_cl_bus_module_put(cldev);
933 		return ret;
934 	}
935 
936 	__module_get(THIS_MODULE);
937 	return 0;
938 }
939 
940 /**
941  * mei_cl_device_remove - remove device from the bus
942  *
943  * @dev: device
944  *
945  * Return:  0 on success; < 0 otherwise
946  */
mei_cl_device_remove(struct device * dev)947 static void mei_cl_device_remove(struct device *dev)
948 {
949 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
950 	struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
951 
952 	if (cldrv->remove)
953 		cldrv->remove(cldev);
954 
955 	mei_cldev_unregister_callbacks(cldev);
956 
957 	mei_cl_bus_module_put(cldev);
958 	module_put(THIS_MODULE);
959 }
960 
name_show(struct device * dev,struct device_attribute * a,char * buf)961 static ssize_t name_show(struct device *dev, struct device_attribute *a,
962 			     char *buf)
963 {
964 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
965 
966 	return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
967 }
968 static DEVICE_ATTR_RO(name);
969 
uuid_show(struct device * dev,struct device_attribute * a,char * buf)970 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
971 			     char *buf)
972 {
973 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
974 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
975 
976 	return sprintf(buf, "%pUl", uuid);
977 }
978 static DEVICE_ATTR_RO(uuid);
979 
version_show(struct device * dev,struct device_attribute * a,char * buf)980 static ssize_t version_show(struct device *dev, struct device_attribute *a,
981 			     char *buf)
982 {
983 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
984 	u8 version = mei_me_cl_ver(cldev->me_cl);
985 
986 	return sprintf(buf, "%02X", version);
987 }
988 static DEVICE_ATTR_RO(version);
989 
modalias_show(struct device * dev,struct device_attribute * a,char * buf)990 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
991 			     char *buf)
992 {
993 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
994 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
995 	u8 version = mei_me_cl_ver(cldev->me_cl);
996 
997 	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
998 			 cldev->name, uuid, version);
999 }
1000 static DEVICE_ATTR_RO(modalias);
1001 
max_conn_show(struct device * dev,struct device_attribute * a,char * buf)1002 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1003 			     char *buf)
1004 {
1005 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1006 	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1007 
1008 	return sprintf(buf, "%d", maxconn);
1009 }
1010 static DEVICE_ATTR_RO(max_conn);
1011 
fixed_show(struct device * dev,struct device_attribute * a,char * buf)1012 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1013 			  char *buf)
1014 {
1015 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1016 	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1017 
1018 	return sprintf(buf, "%d", fixed);
1019 }
1020 static DEVICE_ATTR_RO(fixed);
1021 
vtag_show(struct device * dev,struct device_attribute * a,char * buf)1022 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1023 			 char *buf)
1024 {
1025 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1026 	bool vt = mei_me_cl_vt(cldev->me_cl);
1027 
1028 	return sprintf(buf, "%d", vt);
1029 }
1030 static DEVICE_ATTR_RO(vtag);
1031 
max_len_show(struct device * dev,struct device_attribute * a,char * buf)1032 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1033 			    char *buf)
1034 {
1035 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1036 	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1037 
1038 	return sprintf(buf, "%u", maxlen);
1039 }
1040 static DEVICE_ATTR_RO(max_len);
1041 
1042 static struct attribute *mei_cldev_attrs[] = {
1043 	&dev_attr_name.attr,
1044 	&dev_attr_uuid.attr,
1045 	&dev_attr_version.attr,
1046 	&dev_attr_modalias.attr,
1047 	&dev_attr_max_conn.attr,
1048 	&dev_attr_fixed.attr,
1049 	&dev_attr_vtag.attr,
1050 	&dev_attr_max_len.attr,
1051 	NULL,
1052 };
1053 ATTRIBUTE_GROUPS(mei_cldev);
1054 
1055 /**
1056  * mei_cl_device_uevent - me client bus uevent handler
1057  *
1058  * @dev: device
1059  * @env: uevent kobject
1060  *
1061  * Return: 0 on success -ENOMEM on when add_uevent_var fails
1062  */
mei_cl_device_uevent(struct device * dev,struct kobj_uevent_env * env)1063 static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
1064 {
1065 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1066 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1067 	u8 version = mei_me_cl_ver(cldev->me_cl);
1068 
1069 	if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1070 		return -ENOMEM;
1071 
1072 	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1073 		return -ENOMEM;
1074 
1075 	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1076 		return -ENOMEM;
1077 
1078 	if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1079 			   cldev->name, uuid, version))
1080 		return -ENOMEM;
1081 
1082 	return 0;
1083 }
1084 
1085 static struct bus_type mei_cl_bus_type = {
1086 	.name		= "mei",
1087 	.dev_groups	= mei_cldev_groups,
1088 	.match		= mei_cl_device_match,
1089 	.probe		= mei_cl_device_probe,
1090 	.remove		= mei_cl_device_remove,
1091 	.uevent		= mei_cl_device_uevent,
1092 };
1093 
mei_dev_bus_get(struct mei_device * bus)1094 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1095 {
1096 	if (bus)
1097 		get_device(bus->dev);
1098 
1099 	return bus;
1100 }
1101 
mei_dev_bus_put(struct mei_device * bus)1102 static void mei_dev_bus_put(struct mei_device *bus)
1103 {
1104 	if (bus)
1105 		put_device(bus->dev);
1106 }
1107 
mei_cl_bus_dev_release(struct device * dev)1108 static void mei_cl_bus_dev_release(struct device *dev)
1109 {
1110 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1111 
1112 	if (!cldev)
1113 		return;
1114 
1115 	mei_cl_flush_queues(cldev->cl, NULL);
1116 	mei_me_cl_put(cldev->me_cl);
1117 	mei_dev_bus_put(cldev->bus);
1118 	mei_cl_unlink(cldev->cl);
1119 	kfree(cldev->cl);
1120 	kfree(cldev);
1121 }
1122 
1123 static const struct device_type mei_cl_device_type = {
1124 	.release = mei_cl_bus_dev_release,
1125 };
1126 
1127 /**
1128  * mei_cl_bus_set_name - set device name for me client device
1129  *  <controller>-<client device>
1130  *  Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1131  *
1132  * @cldev: me client device
1133  */
mei_cl_bus_set_name(struct mei_cl_device * cldev)1134 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1135 {
1136 	dev_set_name(&cldev->dev, "%s-%pUl",
1137 		     dev_name(cldev->bus->dev),
1138 		     mei_me_cl_uuid(cldev->me_cl));
1139 }
1140 
1141 /**
1142  * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1143  *
1144  * @bus: mei device
1145  * @me_cl: me client
1146  *
1147  * Return: allocated device structur or NULL on allocation failure
1148  */
mei_cl_bus_dev_alloc(struct mei_device * bus,struct mei_me_client * me_cl)1149 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1150 						  struct mei_me_client *me_cl)
1151 {
1152 	struct mei_cl_device *cldev;
1153 	struct mei_cl *cl;
1154 
1155 	cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1156 	if (!cldev)
1157 		return NULL;
1158 
1159 	cl = mei_cl_allocate(bus);
1160 	if (!cl) {
1161 		kfree(cldev);
1162 		return NULL;
1163 	}
1164 
1165 	device_initialize(&cldev->dev);
1166 	cldev->dev.parent = bus->dev;
1167 	cldev->dev.bus    = &mei_cl_bus_type;
1168 	cldev->dev.type   = &mei_cl_device_type;
1169 	cldev->bus        = mei_dev_bus_get(bus);
1170 	cldev->me_cl      = mei_me_cl_get(me_cl);
1171 	cldev->cl         = cl;
1172 	mei_cl_bus_set_name(cldev);
1173 	cldev->is_added   = 0;
1174 	INIT_LIST_HEAD(&cldev->bus_list);
1175 
1176 	return cldev;
1177 }
1178 
1179 /**
1180  * mei_cl_bus_dev_setup - setup me client device
1181  *    run fix up routines and set the device name
1182  *
1183  * @bus: mei device
1184  * @cldev: me client device
1185  *
1186  * Return: true if the device is eligible for enumeration
1187  */
mei_cl_bus_dev_setup(struct mei_device * bus,struct mei_cl_device * cldev)1188 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1189 				 struct mei_cl_device *cldev)
1190 {
1191 	cldev->do_match = 1;
1192 	mei_cl_bus_dev_fixup(cldev);
1193 
1194 	/* the device name can change during fix up */
1195 	if (cldev->do_match)
1196 		mei_cl_bus_set_name(cldev);
1197 
1198 	return cldev->do_match == 1;
1199 }
1200 
1201 /**
1202  * mei_cl_bus_dev_add - add me client devices
1203  *
1204  * @cldev: me client device
1205  *
1206  * Return: 0 on success; < 0 on failre
1207  */
mei_cl_bus_dev_add(struct mei_cl_device * cldev)1208 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1209 {
1210 	int ret;
1211 
1212 	dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1213 		mei_me_cl_uuid(cldev->me_cl),
1214 		mei_me_cl_ver(cldev->me_cl));
1215 	ret = device_add(&cldev->dev);
1216 	if (!ret)
1217 		cldev->is_added = 1;
1218 
1219 	return ret;
1220 }
1221 
1222 /**
1223  * mei_cl_bus_dev_stop - stop the driver
1224  *
1225  * @cldev: me client device
1226  */
mei_cl_bus_dev_stop(struct mei_cl_device * cldev)1227 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1228 {
1229 	if (cldev->is_added)
1230 		device_release_driver(&cldev->dev);
1231 }
1232 
1233 /**
1234  * mei_cl_bus_dev_destroy - destroy me client devices object
1235  *
1236  * @cldev: me client device
1237  *
1238  * Locking: called under "dev->cl_bus_lock" lock
1239  */
mei_cl_bus_dev_destroy(struct mei_cl_device * cldev)1240 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1241 {
1242 
1243 	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1244 
1245 	if (!cldev->is_added)
1246 		return;
1247 
1248 	device_del(&cldev->dev);
1249 
1250 	list_del_init(&cldev->bus_list);
1251 
1252 	cldev->is_added = 0;
1253 	put_device(&cldev->dev);
1254 }
1255 
1256 /**
1257  * mei_cl_bus_remove_device - remove a devices form the bus
1258  *
1259  * @cldev: me client device
1260  */
mei_cl_bus_remove_device(struct mei_cl_device * cldev)1261 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1262 {
1263 	mei_cl_bus_dev_stop(cldev);
1264 	mei_cl_bus_dev_destroy(cldev);
1265 }
1266 
1267 /**
1268  * mei_cl_bus_remove_devices - remove all devices form the bus
1269  *
1270  * @bus: mei device
1271  */
mei_cl_bus_remove_devices(struct mei_device * bus)1272 void mei_cl_bus_remove_devices(struct mei_device *bus)
1273 {
1274 	struct mei_cl_device *cldev, *next;
1275 
1276 	mutex_lock(&bus->cl_bus_lock);
1277 	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1278 		mei_cl_bus_remove_device(cldev);
1279 	mutex_unlock(&bus->cl_bus_lock);
1280 }
1281 
1282 
1283 /**
1284  * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1285  *     based on me client
1286  *
1287  * @bus: mei device
1288  * @me_cl: me client
1289  *
1290  * Locking: called under "dev->cl_bus_lock" lock
1291  */
mei_cl_bus_dev_init(struct mei_device * bus,struct mei_me_client * me_cl)1292 static void mei_cl_bus_dev_init(struct mei_device *bus,
1293 				struct mei_me_client *me_cl)
1294 {
1295 	struct mei_cl_device *cldev;
1296 
1297 	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1298 
1299 	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1300 
1301 	if (me_cl->bus_added)
1302 		return;
1303 
1304 	cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1305 	if (!cldev)
1306 		return;
1307 
1308 	me_cl->bus_added = true;
1309 	list_add_tail(&cldev->bus_list, &bus->device_list);
1310 
1311 }
1312 
1313 /**
1314  * mei_cl_bus_rescan - scan me clients list and add create
1315  *    devices for eligible clients
1316  *
1317  * @bus: mei device
1318  */
mei_cl_bus_rescan(struct mei_device * bus)1319 static void mei_cl_bus_rescan(struct mei_device *bus)
1320 {
1321 	struct mei_cl_device *cldev, *n;
1322 	struct mei_me_client *me_cl;
1323 
1324 	mutex_lock(&bus->cl_bus_lock);
1325 
1326 	down_read(&bus->me_clients_rwsem);
1327 	list_for_each_entry(me_cl, &bus->me_clients, list)
1328 		mei_cl_bus_dev_init(bus, me_cl);
1329 	up_read(&bus->me_clients_rwsem);
1330 
1331 	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1332 
1333 		if (!mei_me_cl_is_active(cldev->me_cl)) {
1334 			mei_cl_bus_remove_device(cldev);
1335 			continue;
1336 		}
1337 
1338 		if (cldev->is_added)
1339 			continue;
1340 
1341 		if (mei_cl_bus_dev_setup(bus, cldev))
1342 			mei_cl_bus_dev_add(cldev);
1343 		else {
1344 			list_del_init(&cldev->bus_list);
1345 			put_device(&cldev->dev);
1346 		}
1347 	}
1348 	mutex_unlock(&bus->cl_bus_lock);
1349 
1350 	dev_dbg(bus->dev, "rescan end");
1351 }
1352 
mei_cl_bus_rescan_work(struct work_struct * work)1353 void mei_cl_bus_rescan_work(struct work_struct *work)
1354 {
1355 	struct mei_device *bus =
1356 		container_of(work, struct mei_device, bus_rescan_work);
1357 
1358 	mei_cl_bus_rescan(bus);
1359 }
1360 
__mei_cldev_driver_register(struct mei_cl_driver * cldrv,struct module * owner)1361 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1362 				struct module *owner)
1363 {
1364 	int err;
1365 
1366 	cldrv->driver.name = cldrv->name;
1367 	cldrv->driver.owner = owner;
1368 	cldrv->driver.bus = &mei_cl_bus_type;
1369 
1370 	err = driver_register(&cldrv->driver);
1371 	if (err)
1372 		return err;
1373 
1374 	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1375 
1376 	return 0;
1377 }
1378 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1379 
mei_cldev_driver_unregister(struct mei_cl_driver * cldrv)1380 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1381 {
1382 	driver_unregister(&cldrv->driver);
1383 
1384 	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1385 }
1386 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1387 
1388 
mei_cl_bus_init(void)1389 int __init mei_cl_bus_init(void)
1390 {
1391 	return bus_register(&mei_cl_bus_type);
1392 }
1393 
mei_cl_bus_exit(void)1394 void __exit mei_cl_bus_exit(void)
1395 {
1396 	bus_unregister(&mei_cl_bus_type);
1397 }
1398