1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Virtio Transport driver for Arm System Control and Management Interface
4 * (SCMI).
5 *
6 * Copyright (C) 2020-2022 OpenSynergy.
7 * Copyright (C) 2021-2022 ARM Ltd.
8 */
9
10 /**
11 * DOC: Theory of Operation
12 *
13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
14 *
15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
16 * channel (virtio eventq, P2A channel). Each channel is implemented through a
17 * virtqueue. Access to each virtqueue is protected by spinlocks.
18 */
19
20 #include <linux/completion.h>
21 #include <linux/errno.h>
22 #include <linux/refcount.h>
23 #include <linux/slab.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_config.h>
26
27 #include <uapi/linux/virtio_ids.h>
28 #include <uapi/linux/virtio_scmi.h>
29
30 #include "common.h"
31
32 #define VIRTIO_MAX_RX_TIMEOUT_MS 60000
33 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
34 #define VIRTIO_SCMI_MAX_PDU_SIZE \
35 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
36 #define DESCRIPTORS_PER_TX_MSG 2
37
38 /**
39 * struct scmi_vio_channel - Transport channel information
40 *
41 * @vqueue: Associated virtqueue
42 * @cinfo: SCMI Tx or Rx channel
43 * @free_lock: Protects access to the @free_list.
44 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
45 * @deferred_tx_work: Worker for TX deferred replies processing
46 * @deferred_tx_wq: Workqueue for TX deferred replies
47 * @pending_lock: Protects access to the @pending_cmds_list.
48 * @pending_cmds_list: List of pre-fetched commands queueud for later processing
49 * @is_rx: Whether channel is an Rx channel
50 * @max_msg: Maximum number of pending messages for this channel.
51 * @lock: Protects access to all members except users, free_list and
52 * pending_cmds_list.
53 * @shutdown_done: A reference to a completion used when freeing this channel.
54 * @users: A reference count to currently active users of this channel.
55 */
56 struct scmi_vio_channel {
57 struct virtqueue *vqueue;
58 struct scmi_chan_info *cinfo;
59 /* lock to protect access to the free list. */
60 spinlock_t free_lock;
61 struct list_head free_list;
62 /* lock to protect access to the pending list. */
63 spinlock_t pending_lock;
64 struct list_head pending_cmds_list;
65 struct work_struct deferred_tx_work;
66 struct workqueue_struct *deferred_tx_wq;
67 bool is_rx;
68 unsigned int max_msg;
69 /*
70 * Lock to protect access to all members except users, free_list and
71 * pending_cmds_list
72 */
73 spinlock_t lock;
74 struct completion *shutdown_done;
75 refcount_t users;
76 };
77
78 enum poll_states {
79 VIO_MSG_NOT_POLLED,
80 VIO_MSG_POLL_TIMEOUT,
81 VIO_MSG_POLLING,
82 VIO_MSG_POLL_DONE,
83 };
84
85 /**
86 * struct scmi_vio_msg - Transport PDU information
87 *
88 * @request: SDU used for commands
89 * @input: SDU used for (delayed) responses and notifications
90 * @list: List which scmi_vio_msg may be part of
91 * @rx_len: Input SDU size in bytes, once input has been received
92 * @poll_idx: Last used index registered for polling purposes if this message
93 * transaction reply was configured for polling.
94 * @poll_status: Polling state for this message.
95 * @poll_lock: A lock to protect @poll_status
96 * @users: A reference count to track this message users and avoid premature
97 * freeing (and reuse) when polling and IRQ execution paths interleave.
98 */
99 struct scmi_vio_msg {
100 struct scmi_msg_payld *request;
101 struct scmi_msg_payld *input;
102 struct list_head list;
103 unsigned int rx_len;
104 unsigned int poll_idx;
105 enum poll_states poll_status;
106 /* Lock to protect access to poll_status */
107 spinlock_t poll_lock;
108 refcount_t users;
109 };
110
111 /* Only one SCMI VirtIO device can possibly exist */
112 static struct virtio_device *scmi_vdev;
113
scmi_vio_channel_ready(struct scmi_vio_channel * vioch,struct scmi_chan_info * cinfo)114 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
115 struct scmi_chan_info *cinfo)
116 {
117 unsigned long flags;
118
119 spin_lock_irqsave(&vioch->lock, flags);
120 cinfo->transport_info = vioch;
121 /* Indirectly setting channel not available any more */
122 vioch->cinfo = cinfo;
123 spin_unlock_irqrestore(&vioch->lock, flags);
124
125 refcount_set(&vioch->users, 1);
126 }
127
scmi_vio_channel_acquire(struct scmi_vio_channel * vioch)128 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
129 {
130 return refcount_inc_not_zero(&vioch->users);
131 }
132
scmi_vio_channel_release(struct scmi_vio_channel * vioch)133 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
134 {
135 if (refcount_dec_and_test(&vioch->users)) {
136 unsigned long flags;
137
138 spin_lock_irqsave(&vioch->lock, flags);
139 if (vioch->shutdown_done) {
140 vioch->cinfo = NULL;
141 complete(vioch->shutdown_done);
142 }
143 spin_unlock_irqrestore(&vioch->lock, flags);
144 }
145 }
146
scmi_vio_channel_cleanup_sync(struct scmi_vio_channel * vioch)147 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
148 {
149 unsigned long flags;
150 DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
151
152 /*
153 * Prepare to wait for the last release if not already released
154 * or in progress.
155 */
156 spin_lock_irqsave(&vioch->lock, flags);
157 if (!vioch->cinfo || vioch->shutdown_done) {
158 spin_unlock_irqrestore(&vioch->lock, flags);
159 return;
160 }
161
162 vioch->shutdown_done = &vioch_shutdown_done;
163 if (!vioch->is_rx && vioch->deferred_tx_wq)
164 /* Cannot be kicked anymore after this...*/
165 vioch->deferred_tx_wq = NULL;
166 spin_unlock_irqrestore(&vioch->lock, flags);
167
168 scmi_vio_channel_release(vioch);
169
170 /* Let any possibly concurrent RX path release the channel */
171 wait_for_completion(vioch->shutdown_done);
172 }
173
174 /* Assumes to be called with vio channel acquired already */
175 static struct scmi_vio_msg *
scmi_virtio_get_free_msg(struct scmi_vio_channel * vioch)176 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
177 {
178 unsigned long flags;
179 struct scmi_vio_msg *msg;
180
181 spin_lock_irqsave(&vioch->free_lock, flags);
182 if (list_empty(&vioch->free_list)) {
183 spin_unlock_irqrestore(&vioch->free_lock, flags);
184 return NULL;
185 }
186
187 msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
188 list_del_init(&msg->list);
189 spin_unlock_irqrestore(&vioch->free_lock, flags);
190
191 /* Still no users, no need to acquire poll_lock */
192 msg->poll_status = VIO_MSG_NOT_POLLED;
193 refcount_set(&msg->users, 1);
194
195 return msg;
196 }
197
scmi_vio_msg_acquire(struct scmi_vio_msg * msg)198 static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
199 {
200 return refcount_inc_not_zero(&msg->users);
201 }
202
203 /* Assumes to be called with vio channel acquired already */
scmi_vio_msg_release(struct scmi_vio_channel * vioch,struct scmi_vio_msg * msg)204 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
205 struct scmi_vio_msg *msg)
206 {
207 bool ret;
208
209 ret = refcount_dec_and_test(&msg->users);
210 if (ret) {
211 unsigned long flags;
212
213 spin_lock_irqsave(&vioch->free_lock, flags);
214 list_add_tail(&msg->list, &vioch->free_list);
215 spin_unlock_irqrestore(&vioch->free_lock, flags);
216 }
217
218 return ret;
219 }
220
scmi_vio_have_vq_rx(struct virtio_device * vdev)221 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
222 {
223 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
224 }
225
scmi_vio_feed_vq_rx(struct scmi_vio_channel * vioch,struct scmi_vio_msg * msg)226 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
227 struct scmi_vio_msg *msg)
228 {
229 struct scatterlist sg_in;
230 int rc;
231 unsigned long flags;
232 struct device *dev = &vioch->vqueue->vdev->dev;
233
234 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
235
236 spin_lock_irqsave(&vioch->lock, flags);
237
238 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
239 if (rc)
240 dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
241 else
242 virtqueue_kick(vioch->vqueue);
243
244 spin_unlock_irqrestore(&vioch->lock, flags);
245
246 return rc;
247 }
248
249 /*
250 * Assume to be called with channel already acquired or not ready at all;
251 * vioch->lock MUST NOT have been already acquired.
252 */
scmi_finalize_message(struct scmi_vio_channel * vioch,struct scmi_vio_msg * msg)253 static void scmi_finalize_message(struct scmi_vio_channel *vioch,
254 struct scmi_vio_msg *msg)
255 {
256 if (vioch->is_rx)
257 scmi_vio_feed_vq_rx(vioch, msg);
258 else
259 scmi_vio_msg_release(vioch, msg);
260 }
261
scmi_vio_complete_cb(struct virtqueue * vqueue)262 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
263 {
264 unsigned long flags;
265 unsigned int length;
266 struct scmi_vio_channel *vioch;
267 struct scmi_vio_msg *msg;
268 bool cb_enabled = true;
269
270 if (WARN_ON_ONCE(!vqueue->vdev->priv))
271 return;
272 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
273
274 for (;;) {
275 if (!scmi_vio_channel_acquire(vioch))
276 return;
277
278 spin_lock_irqsave(&vioch->lock, flags);
279 if (cb_enabled) {
280 virtqueue_disable_cb(vqueue);
281 cb_enabled = false;
282 }
283
284 msg = virtqueue_get_buf(vqueue, &length);
285 if (!msg) {
286 if (virtqueue_enable_cb(vqueue)) {
287 spin_unlock_irqrestore(&vioch->lock, flags);
288 scmi_vio_channel_release(vioch);
289 return;
290 }
291 cb_enabled = true;
292 }
293 spin_unlock_irqrestore(&vioch->lock, flags);
294
295 if (msg) {
296 msg->rx_len = length;
297 scmi_rx_callback(vioch->cinfo,
298 msg_read_header(msg->input), msg);
299
300 scmi_finalize_message(vioch, msg);
301 }
302
303 /*
304 * Release vio channel between loop iterations to allow
305 * virtio_chan_free() to eventually fully release it when
306 * shutting down; in such a case, any outstanding message will
307 * be ignored since this loop will bail out at the next
308 * iteration.
309 */
310 scmi_vio_channel_release(vioch);
311 }
312 }
313
scmi_vio_deferred_tx_worker(struct work_struct * work)314 static void scmi_vio_deferred_tx_worker(struct work_struct *work)
315 {
316 unsigned long flags;
317 struct scmi_vio_channel *vioch;
318 struct scmi_vio_msg *msg, *tmp;
319
320 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
321
322 if (!scmi_vio_channel_acquire(vioch))
323 return;
324
325 /*
326 * Process pre-fetched messages: these could be non-polled messages or
327 * late timed-out replies to polled messages dequeued by chance while
328 * polling for some other messages: this worker is in charge to process
329 * the valid non-expired messages and anyway finally free all of them.
330 */
331 spin_lock_irqsave(&vioch->pending_lock, flags);
332
333 /* Scan the list of possibly pre-fetched messages during polling. */
334 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
335 list_del(&msg->list);
336
337 /*
338 * Channel is acquired here (cannot vanish) and this message
339 * is no more processed elsewhere so no poll_lock needed.
340 */
341 if (msg->poll_status == VIO_MSG_NOT_POLLED)
342 scmi_rx_callback(vioch->cinfo,
343 msg_read_header(msg->input), msg);
344
345 /* Free the processed message once done */
346 scmi_vio_msg_release(vioch, msg);
347 }
348
349 spin_unlock_irqrestore(&vioch->pending_lock, flags);
350
351 /* Process possibly still pending messages */
352 scmi_vio_complete_cb(vioch->vqueue);
353
354 scmi_vio_channel_release(vioch);
355 }
356
357 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
358
359 static vq_callback_t *scmi_vio_complete_callbacks[] = {
360 scmi_vio_complete_cb,
361 scmi_vio_complete_cb
362 };
363
virtio_get_max_msg(struct scmi_chan_info * base_cinfo)364 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
365 {
366 struct scmi_vio_channel *vioch = base_cinfo->transport_info;
367
368 return vioch->max_msg;
369 }
370
virtio_link_supplier(struct device * dev)371 static int virtio_link_supplier(struct device *dev)
372 {
373 if (!scmi_vdev) {
374 dev_notice(dev,
375 "Deferring probe after not finding a bound scmi-virtio device\n");
376 return -EPROBE_DEFER;
377 }
378
379 if (!device_link_add(dev, &scmi_vdev->dev,
380 DL_FLAG_AUTOREMOVE_CONSUMER)) {
381 dev_err(dev, "Adding link to supplier virtio device failed\n");
382 return -ECANCELED;
383 }
384
385 return 0;
386 }
387
virtio_chan_available(struct device * dev,int idx)388 static bool virtio_chan_available(struct device *dev, int idx)
389 {
390 struct scmi_vio_channel *channels, *vioch = NULL;
391
392 if (WARN_ON_ONCE(!scmi_vdev))
393 return false;
394
395 channels = (struct scmi_vio_channel *)scmi_vdev->priv;
396
397 switch (idx) {
398 case VIRTIO_SCMI_VQ_TX:
399 vioch = &channels[VIRTIO_SCMI_VQ_TX];
400 break;
401 case VIRTIO_SCMI_VQ_RX:
402 if (scmi_vio_have_vq_rx(scmi_vdev))
403 vioch = &channels[VIRTIO_SCMI_VQ_RX];
404 break;
405 default:
406 return false;
407 }
408
409 return vioch && !vioch->cinfo;
410 }
411
scmi_destroy_tx_workqueue(void * deferred_tx_wq)412 static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)
413 {
414 destroy_workqueue(deferred_tx_wq);
415 }
416
virtio_chan_setup(struct scmi_chan_info * cinfo,struct device * dev,bool tx)417 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
418 bool tx)
419 {
420 struct scmi_vio_channel *vioch;
421 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
422 int i;
423
424 if (!scmi_vdev)
425 return -EPROBE_DEFER;
426
427 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
428
429 /* Setup a deferred worker for polling. */
430 if (tx && !vioch->deferred_tx_wq) {
431 int ret;
432
433 vioch->deferred_tx_wq =
434 alloc_workqueue(dev_name(&scmi_vdev->dev),
435 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
436 0);
437 if (!vioch->deferred_tx_wq)
438 return -ENOMEM;
439
440 ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue,
441 vioch->deferred_tx_wq);
442 if (ret)
443 return ret;
444
445 INIT_WORK(&vioch->deferred_tx_work,
446 scmi_vio_deferred_tx_worker);
447 }
448
449 for (i = 0; i < vioch->max_msg; i++) {
450 struct scmi_vio_msg *msg;
451
452 msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
453 if (!msg)
454 return -ENOMEM;
455
456 if (tx) {
457 msg->request = devm_kzalloc(dev,
458 VIRTIO_SCMI_MAX_PDU_SIZE,
459 GFP_KERNEL);
460 if (!msg->request)
461 return -ENOMEM;
462 spin_lock_init(&msg->poll_lock);
463 refcount_set(&msg->users, 1);
464 }
465
466 msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
467 GFP_KERNEL);
468 if (!msg->input)
469 return -ENOMEM;
470
471 scmi_finalize_message(vioch, msg);
472 }
473
474 scmi_vio_channel_ready(vioch, cinfo);
475
476 return 0;
477 }
478
virtio_chan_free(int id,void * p,void * data)479 static int virtio_chan_free(int id, void *p, void *data)
480 {
481 struct scmi_chan_info *cinfo = p;
482 struct scmi_vio_channel *vioch = cinfo->transport_info;
483
484 /*
485 * Break device to inhibit further traffic flowing while shutting down
486 * the channels: doing it later holding vioch->lock creates unsafe
487 * locking dependency chains as reported by LOCKDEP.
488 */
489 virtio_break_device(vioch->vqueue->vdev);
490 scmi_vio_channel_cleanup_sync(vioch);
491
492 scmi_free_channel(cinfo, data, id);
493
494 return 0;
495 }
496
virtio_send_message(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)497 static int virtio_send_message(struct scmi_chan_info *cinfo,
498 struct scmi_xfer *xfer)
499 {
500 struct scmi_vio_channel *vioch = cinfo->transport_info;
501 struct scatterlist sg_out;
502 struct scatterlist sg_in;
503 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
504 unsigned long flags;
505 int rc;
506 struct scmi_vio_msg *msg;
507
508 if (!scmi_vio_channel_acquire(vioch))
509 return -EINVAL;
510
511 msg = scmi_virtio_get_free_msg(vioch);
512 if (!msg) {
513 scmi_vio_channel_release(vioch);
514 return -EBUSY;
515 }
516
517 msg_tx_prepare(msg->request, xfer);
518
519 sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
520 sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
521
522 spin_lock_irqsave(&vioch->lock, flags);
523
524 /*
525 * If polling was requested for this transaction:
526 * - retrieve last used index (will be used as polling reference)
527 * - bind the polled message to the xfer via .priv
528 * - grab an additional msg refcount for the poll-path
529 */
530 if (xfer->hdr.poll_completion) {
531 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
532 /* Still no users, no need to acquire poll_lock */
533 msg->poll_status = VIO_MSG_POLLING;
534 scmi_vio_msg_acquire(msg);
535 /* Ensure initialized msg is visibly bound to xfer */
536 smp_store_mb(xfer->priv, msg);
537 }
538
539 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
540 if (rc)
541 dev_err(vioch->cinfo->dev,
542 "failed to add to TX virtqueue (%d)\n", rc);
543 else
544 virtqueue_kick(vioch->vqueue);
545
546 spin_unlock_irqrestore(&vioch->lock, flags);
547
548 if (rc) {
549 /* Ensure order between xfer->priv clear and vq feeding */
550 smp_store_mb(xfer->priv, NULL);
551 if (xfer->hdr.poll_completion)
552 scmi_vio_msg_release(vioch, msg);
553 scmi_vio_msg_release(vioch, msg);
554 }
555
556 scmi_vio_channel_release(vioch);
557
558 return rc;
559 }
560
virtio_fetch_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)561 static void virtio_fetch_response(struct scmi_chan_info *cinfo,
562 struct scmi_xfer *xfer)
563 {
564 struct scmi_vio_msg *msg = xfer->priv;
565
566 if (msg)
567 msg_fetch_response(msg->input, msg->rx_len, xfer);
568 }
569
virtio_fetch_notification(struct scmi_chan_info * cinfo,size_t max_len,struct scmi_xfer * xfer)570 static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
571 size_t max_len, struct scmi_xfer *xfer)
572 {
573 struct scmi_vio_msg *msg = xfer->priv;
574
575 if (msg)
576 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
577 }
578
579 /**
580 * virtio_mark_txdone - Mark transmission done
581 *
582 * Free only completed polling transfer messages.
583 *
584 * Note that in the SCMI VirtIO transport we never explicitly release still
585 * outstanding but timed-out messages by forcibly re-adding them to the
586 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
587 * TX deferred worker, eventually clean up such messages once, finally, a late
588 * reply is received and discarded (if ever).
589 *
590 * This approach was deemed preferable since those pending timed-out buffers are
591 * still effectively owned by the SCMI platform VirtIO device even after timeout
592 * expiration: forcibly freeing and reusing them before they had been returned
593 * explicitly by the SCMI platform could lead to subtle bugs due to message
594 * corruption.
595 * An SCMI platform VirtIO device which never returns message buffers is
596 * anyway broken and it will quickly lead to exhaustion of available messages.
597 *
598 * For this same reason, here, we take care to free only the polled messages
599 * that had been somehow replied (only if not by chance already processed on the
600 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
601 * any timed-out polled message if that indeed appears to have been at least
602 * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
603 * messages won't be freed elsewhere. Any other polled message is marked as
604 * VIO_MSG_POLL_TIMEOUT.
605 *
606 * Possible late replies to timed-out polled messages will be eventually freed
607 * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
608 * dequeued on some other polling path.
609 *
610 * @cinfo: SCMI channel info
611 * @ret: Transmission return code
612 * @xfer: Transfer descriptor
613 */
virtio_mark_txdone(struct scmi_chan_info * cinfo,int ret,struct scmi_xfer * xfer)614 static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
615 struct scmi_xfer *xfer)
616 {
617 unsigned long flags;
618 struct scmi_vio_channel *vioch = cinfo->transport_info;
619 struct scmi_vio_msg *msg = xfer->priv;
620
621 if (!msg || !scmi_vio_channel_acquire(vioch))
622 return;
623
624 /* Ensure msg is unbound from xfer anyway at this point */
625 smp_store_mb(xfer->priv, NULL);
626
627 /* Must be a polled xfer and not already freed on the IRQ path */
628 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
629 scmi_vio_channel_release(vioch);
630 return;
631 }
632
633 spin_lock_irqsave(&msg->poll_lock, flags);
634 /* Do not free timedout polled messages only if still inflight */
635 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
636 scmi_vio_msg_release(vioch, msg);
637 else if (msg->poll_status == VIO_MSG_POLLING)
638 msg->poll_status = VIO_MSG_POLL_TIMEOUT;
639 spin_unlock_irqrestore(&msg->poll_lock, flags);
640
641 scmi_vio_channel_release(vioch);
642 }
643
644 /**
645 * virtio_poll_done - Provide polling support for VirtIO transport
646 *
647 * @cinfo: SCMI channel info
648 * @xfer: Reference to the transfer being poll for.
649 *
650 * VirtIO core provides a polling mechanism based only on last used indexes:
651 * this means that it is possible to poll the virtqueues waiting for something
652 * new to arrive from the host side, but the only way to check if the freshly
653 * arrived buffer was indeed what we were waiting for is to compare the newly
654 * arrived message descriptor with the one we are polling on.
655 *
656 * As a consequence it can happen to dequeue something different from the buffer
657 * we were poll-waiting for: if that is the case such early fetched buffers are
658 * then added to a the @pending_cmds_list list for later processing by a
659 * dedicated deferred worker.
660 *
661 * So, basically, once something new is spotted we proceed to de-queue all the
662 * freshly received used buffers until we found the one we were polling on, or,
663 * we have 'seemingly' emptied the virtqueue; if some buffers are still pending
664 * in the vqueue at the end of the polling loop (possible due to inherent races
665 * in virtqueues handling mechanisms), we similarly kick the deferred worker
666 * and let it process those, to avoid indefinitely looping in the .poll_done
667 * busy-waiting helper.
668 *
669 * Finally, we delegate to the deferred worker also the final free of any timed
670 * out reply to a polled message that we should dequeue.
671 *
672 * Note that, since we do NOT have per-message suppress notification mechanism,
673 * the message we are polling for could be alternatively delivered via usual
674 * IRQs callbacks on another core which happened to have IRQs enabled while we
675 * are actively polling for it here: in such a case it will be handled as such
676 * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
677 * transparently terminated anyway.
678 *
679 * Return: True once polling has successfully completed.
680 */
virtio_poll_done(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)681 static bool virtio_poll_done(struct scmi_chan_info *cinfo,
682 struct scmi_xfer *xfer)
683 {
684 bool pending, found = false;
685 unsigned int length, any_prefetched = 0;
686 unsigned long flags;
687 struct scmi_vio_msg *next_msg, *msg = xfer->priv;
688 struct scmi_vio_channel *vioch = cinfo->transport_info;
689
690 if (!msg)
691 return true;
692
693 /*
694 * Processed already by other polling loop on another CPU ?
695 *
696 * Note that this message is acquired on the poll path so cannot vanish
697 * while inside this loop iteration even if concurrently processed on
698 * the IRQ path.
699 *
700 * Avoid to acquire poll_lock since polled_status can be changed
701 * in a relevant manner only later in this same thread of execution:
702 * any other possible changes made concurrently by other polling loops
703 * or by a reply delivered on the IRQ path have no meaningful impact on
704 * this loop iteration: in other words it is harmless to allow this
705 * possible race but let has avoid spinlocking with irqs off in this
706 * initial part of the polling loop.
707 */
708 if (msg->poll_status == VIO_MSG_POLL_DONE)
709 return true;
710
711 if (!scmi_vio_channel_acquire(vioch))
712 return true;
713
714 /* Has cmdq index moved at all ? */
715 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
716 if (!pending) {
717 scmi_vio_channel_release(vioch);
718 return false;
719 }
720
721 spin_lock_irqsave(&vioch->lock, flags);
722 virtqueue_disable_cb(vioch->vqueue);
723
724 /*
725 * Process all new messages till the polled-for message is found OR
726 * the vqueue is empty.
727 */
728 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
729 bool next_msg_done = false;
730
731 /*
732 * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
733 * that can be properly freed even on timeout in mark_txdone.
734 */
735 spin_lock(&next_msg->poll_lock);
736 if (next_msg->poll_status == VIO_MSG_POLLING) {
737 next_msg->poll_status = VIO_MSG_POLL_DONE;
738 next_msg_done = true;
739 }
740 spin_unlock(&next_msg->poll_lock);
741
742 next_msg->rx_len = length;
743 /* Is the message we were polling for ? */
744 if (next_msg == msg) {
745 found = true;
746 break;
747 } else if (next_msg_done) {
748 /* Skip the rest if this was another polled msg */
749 continue;
750 }
751
752 /*
753 * Enqueue for later processing any non-polled message and any
754 * timed-out polled one that we happen to have dequeued.
755 */
756 spin_lock(&next_msg->poll_lock);
757 if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
758 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
759 spin_unlock(&next_msg->poll_lock);
760
761 any_prefetched++;
762 spin_lock(&vioch->pending_lock);
763 list_add_tail(&next_msg->list,
764 &vioch->pending_cmds_list);
765 spin_unlock(&vioch->pending_lock);
766 } else {
767 spin_unlock(&next_msg->poll_lock);
768 }
769 }
770
771 /*
772 * When the polling loop has successfully terminated if something
773 * else was queued in the meantime, it will be served by a deferred
774 * worker OR by the normal IRQ/callback OR by other poll loops.
775 *
776 * If we are still looking for the polled reply, the polling index has
777 * to be updated to the current vqueue last used index.
778 */
779 if (found) {
780 pending = !virtqueue_enable_cb(vioch->vqueue);
781 } else {
782 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
783 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
784 }
785
786 if (vioch->deferred_tx_wq && (any_prefetched || pending))
787 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
788
789 spin_unlock_irqrestore(&vioch->lock, flags);
790
791 scmi_vio_channel_release(vioch);
792
793 return found;
794 }
795
796 static const struct scmi_transport_ops scmi_virtio_ops = {
797 .link_supplier = virtio_link_supplier,
798 .chan_available = virtio_chan_available,
799 .chan_setup = virtio_chan_setup,
800 .chan_free = virtio_chan_free,
801 .get_max_msg = virtio_get_max_msg,
802 .send_message = virtio_send_message,
803 .fetch_response = virtio_fetch_response,
804 .fetch_notification = virtio_fetch_notification,
805 .mark_txdone = virtio_mark_txdone,
806 .poll_done = virtio_poll_done,
807 };
808
scmi_vio_probe(struct virtio_device * vdev)809 static int scmi_vio_probe(struct virtio_device *vdev)
810 {
811 struct device *dev = &vdev->dev;
812 struct scmi_vio_channel *channels;
813 bool have_vq_rx;
814 int vq_cnt;
815 int i;
816 int ret;
817 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
818
819 /* Only one SCMI VirtiO device allowed */
820 if (scmi_vdev) {
821 dev_err(dev,
822 "One SCMI Virtio device was already initialized: only one allowed.\n");
823 return -EBUSY;
824 }
825
826 have_vq_rx = scmi_vio_have_vq_rx(vdev);
827 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
828
829 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
830 if (!channels)
831 return -ENOMEM;
832
833 if (have_vq_rx)
834 channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
835
836 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
837 scmi_vio_vqueue_names, NULL);
838 if (ret) {
839 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
840 return ret;
841 }
842
843 for (i = 0; i < vq_cnt; i++) {
844 unsigned int sz;
845
846 spin_lock_init(&channels[i].lock);
847 spin_lock_init(&channels[i].free_lock);
848 INIT_LIST_HEAD(&channels[i].free_list);
849 spin_lock_init(&channels[i].pending_lock);
850 INIT_LIST_HEAD(&channels[i].pending_cmds_list);
851 channels[i].vqueue = vqs[i];
852
853 sz = virtqueue_get_vring_size(channels[i].vqueue);
854 /* Tx messages need multiple descriptors. */
855 if (!channels[i].is_rx)
856 sz /= DESCRIPTORS_PER_TX_MSG;
857
858 if (sz > MSG_TOKEN_MAX) {
859 dev_info(dev,
860 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
861 channels[i].is_rx ? "rx" : "tx",
862 sz, MSG_TOKEN_MAX);
863 sz = MSG_TOKEN_MAX;
864 }
865 channels[i].max_msg = sz;
866 }
867
868 vdev->priv = channels;
869 /* Ensure initialized scmi_vdev is visible */
870 smp_store_mb(scmi_vdev, vdev);
871
872 return 0;
873 }
874
scmi_vio_remove(struct virtio_device * vdev)875 static void scmi_vio_remove(struct virtio_device *vdev)
876 {
877 /*
878 * Once we get here, virtio_chan_free() will have already been called by
879 * the SCMI core for any existing channel and, as a consequence, all the
880 * virtio channels will have been already marked NOT ready, causing any
881 * outstanding message on any vqueue to be ignored by complete_cb: now
882 * we can just stop processing buffers and destroy the vqueues.
883 */
884 virtio_reset_device(vdev);
885 vdev->config->del_vqs(vdev);
886 /* Ensure scmi_vdev is visible as NULL */
887 smp_store_mb(scmi_vdev, NULL);
888 }
889
scmi_vio_validate(struct virtio_device * vdev)890 static int scmi_vio_validate(struct virtio_device *vdev)
891 {
892 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
893 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
894 dev_err(&vdev->dev,
895 "device does not comply with spec version 1.x\n");
896 return -EINVAL;
897 }
898 #endif
899 return 0;
900 }
901
902 static unsigned int features[] = {
903 VIRTIO_SCMI_F_P2A_CHANNELS,
904 };
905
906 static const struct virtio_device_id id_table[] = {
907 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
908 { 0 }
909 };
910
911 static struct virtio_driver virtio_scmi_driver = {
912 .driver.name = "scmi-virtio",
913 .driver.owner = THIS_MODULE,
914 .feature_table = features,
915 .feature_table_size = ARRAY_SIZE(features),
916 .id_table = id_table,
917 .probe = scmi_vio_probe,
918 .remove = scmi_vio_remove,
919 .validate = scmi_vio_validate,
920 };
921
virtio_scmi_init(void)922 static int __init virtio_scmi_init(void)
923 {
924 return register_virtio_driver(&virtio_scmi_driver);
925 }
926
virtio_scmi_exit(void)927 static void virtio_scmi_exit(void)
928 {
929 unregister_virtio_driver(&virtio_scmi_driver);
930 }
931
932 const struct scmi_desc scmi_virtio_desc = {
933 .transport_init = virtio_scmi_init,
934 .transport_exit = virtio_scmi_exit,
935 .ops = &scmi_virtio_ops,
936 /* for non-realtime virtio devices */
937 .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
938 .max_msg = 0, /* overridden by virtio_get_max_msg() */
939 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
940 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
941 };
942