1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2021 ARM Ltd.
15 */
16
17 #include <linux/bitmap.h>
18 #include <linux/device.h>
19 #include <linux/export.h>
20 #include <linux/idr.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/ktime.h>
24 #include <linux/hashtable.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/processor.h>
30 #include <linux/refcount.h>
31 #include <linux/slab.h>
32
33 #include "common.h"
34 #include "notify.h"
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/scmi.h>
38
39 enum scmi_error_codes {
40 SCMI_SUCCESS = 0, /* Success */
41 SCMI_ERR_SUPPORT = -1, /* Not supported */
42 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY = -4, /* Not found */
45 SCMI_ERR_RANGE = -5, /* Value out of range */
46 SCMI_ERR_BUSY = -6, /* Device busy */
47 SCMI_ERR_COMMS = -7, /* Communication Error */
48 SCMI_ERR_GENERIC = -8, /* Generic Error */
49 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
51 };
52
53 /* List of all SCMI devices active in system */
54 static LIST_HEAD(scmi_list);
55 /* Protection for the entire list */
56 static DEFINE_MUTEX(scmi_list_mutex);
57 /* Track the unique id for the transfers for debug & profiling purpose */
58 static atomic_t transfer_last_id;
59
60 static DEFINE_IDR(scmi_requested_devices);
61 static DEFINE_MUTEX(scmi_requested_devices_mtx);
62
63 struct scmi_requested_dev {
64 const struct scmi_device_id *id_table;
65 struct list_head node;
66 };
67
68 /**
69 * struct scmi_xfers_info - Structure to manage transfer information
70 *
71 * @xfer_alloc_table: Bitmap table for allocated messages.
72 * Index of this bitmap table is also used for message
73 * sequence identifier.
74 * @xfer_lock: Protection for message allocation
75 * @max_msg: Maximum number of messages that can be pending
76 * @free_xfers: A free list for available to use xfers. It is initialized with
77 * a number of xfers equal to the maximum allowed in-flight
78 * messages.
79 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
80 * currently in-flight messages.
81 */
82 struct scmi_xfers_info {
83 unsigned long *xfer_alloc_table;
84 spinlock_t xfer_lock;
85 int max_msg;
86 struct hlist_head free_xfers;
87 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
88 };
89
90 /**
91 * struct scmi_protocol_instance - Describe an initialized protocol instance.
92 * @handle: Reference to the SCMI handle associated to this protocol instance.
93 * @proto: A reference to the protocol descriptor.
94 * @gid: A reference for per-protocol devres management.
95 * @users: A refcount to track effective users of this protocol.
96 * @priv: Reference for optional protocol private data.
97 * @ph: An embedded protocol handle that will be passed down to protocol
98 * initialization code to identify this instance.
99 *
100 * Each protocol is initialized independently once for each SCMI platform in
101 * which is defined by DT and implemented by the SCMI server fw.
102 */
103 struct scmi_protocol_instance {
104 const struct scmi_handle *handle;
105 const struct scmi_protocol *proto;
106 void *gid;
107 refcount_t users;
108 void *priv;
109 struct scmi_protocol_handle ph;
110 };
111
112 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
113
114 /**
115 * struct scmi_info - Structure representing a SCMI instance
116 *
117 * @dev: Device pointer
118 * @desc: SoC description for this instance
119 * @version: SCMI revision information containing protocol version,
120 * implementation version and (sub-)vendor identification.
121 * @handle: Instance of SCMI handle to send to clients
122 * @tx_minfo: Universal Transmit Message management info
123 * @rx_minfo: Universal Receive Message management info
124 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
125 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
126 * @protocols: IDR for protocols' instance descriptors initialized for
127 * this SCMI instance: populated on protocol's first attempted
128 * usage.
129 * @protocols_mtx: A mutex to protect protocols instances initialization.
130 * @protocols_imp: List of protocols implemented, currently maximum of
131 * scmi_revision_info.num_protocols elements allocated by the
132 * base protocol
133 * @active_protocols: IDR storing device_nodes for protocols actually defined
134 * in the DT and confirmed as implemented by fw.
135 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
136 * in microseconds, for atomic operations.
137 * Only SCMI synchronous commands reported by the platform
138 * to have an execution latency lesser-equal to the threshold
139 * should be considered for atomic mode operation: such
140 * decision is finally left up to the SCMI drivers.
141 * @notify_priv: Pointer to private data structure specific to notifications.
142 * @node: List head
143 * @users: Number of users of this instance
144 */
145 struct scmi_info {
146 struct device *dev;
147 const struct scmi_desc *desc;
148 struct scmi_revision_info version;
149 struct scmi_handle handle;
150 struct scmi_xfers_info tx_minfo;
151 struct scmi_xfers_info rx_minfo;
152 struct idr tx_idr;
153 struct idr rx_idr;
154 struct idr protocols;
155 /* Ensure mutual exclusive access to protocols instance array */
156 struct mutex protocols_mtx;
157 u8 *protocols_imp;
158 struct idr active_protocols;
159 unsigned int atomic_threshold;
160 void *notify_priv;
161 struct list_head node;
162 int users;
163 };
164
165 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
166
167 static const int scmi_linux_errmap[] = {
168 /* better than switch case as long as return value is continuous */
169 0, /* SCMI_SUCCESS */
170 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
171 -EINVAL, /* SCMI_ERR_PARAM */
172 -EACCES, /* SCMI_ERR_ACCESS */
173 -ENOENT, /* SCMI_ERR_ENTRY */
174 -ERANGE, /* SCMI_ERR_RANGE */
175 -EBUSY, /* SCMI_ERR_BUSY */
176 -ECOMM, /* SCMI_ERR_COMMS */
177 -EIO, /* SCMI_ERR_GENERIC */
178 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
179 -EPROTO, /* SCMI_ERR_PROTOCOL */
180 };
181
scmi_to_linux_errno(int errno)182 static inline int scmi_to_linux_errno(int errno)
183 {
184 int err_idx = -errno;
185
186 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
187 return scmi_linux_errmap[err_idx];
188 return -EIO;
189 }
190
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)191 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
192 void *priv)
193 {
194 struct scmi_info *info = handle_to_scmi_info(handle);
195
196 info->notify_priv = priv;
197 /* Ensure updated protocol private date are visible */
198 smp_wmb();
199 }
200
scmi_notification_instance_data_get(const struct scmi_handle * handle)201 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
202 {
203 struct scmi_info *info = handle_to_scmi_info(handle);
204
205 /* Ensure protocols_private_data has been updated */
206 smp_rmb();
207 return info->notify_priv;
208 }
209
210 /**
211 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
212 *
213 * @minfo: Pointer to Tx/Rx Message management info based on channel type
214 * @xfer: The xfer to act upon
215 *
216 * Pick the next unused monotonically increasing token and set it into
217 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
218 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
219 * of incorrect association of a late and expired xfer with a live in-flight
220 * transaction, both happening to re-use the same token identifier.
221 *
222 * Since platform is NOT required to answer our request in-order we should
223 * account for a few rare but possible scenarios:
224 *
225 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
226 * using find_next_zero_bit() starting from candidate next_token bit
227 *
228 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
229 * are plenty of free tokens at start, so try a second pass using
230 * find_next_zero_bit() and starting from 0.
231 *
232 * X = used in-flight
233 *
234 * Normal
235 * ------
236 *
237 * |- xfer_id picked
238 * -----------+----------------------------------------------------------
239 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
240 * ----------------------------------------------------------------------
241 * ^
242 * |- next_token
243 *
244 * Out-of-order pending at start
245 * -----------------------------
246 *
247 * |- xfer_id picked, last_token fixed
248 * -----+----------------------------------------------------------------
249 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
250 * ----------------------------------------------------------------------
251 * ^
252 * |- next_token
253 *
254 *
255 * Out-of-order pending at end
256 * ---------------------------
257 *
258 * |- xfer_id picked, last_token fixed
259 * -----+----------------------------------------------------------------
260 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
261 * ----------------------------------------------------------------------
262 * ^
263 * |- next_token
264 *
265 * Context: Assumes to be called with @xfer_lock already acquired.
266 *
267 * Return: 0 on Success or error
268 */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)269 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
270 struct scmi_xfer *xfer)
271 {
272 unsigned long xfer_id, next_token;
273
274 /*
275 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
276 * using the pre-allocated transfer_id as a base.
277 * Note that the global transfer_id is shared across all message types
278 * so there could be holes in the allocated set of monotonic sequence
279 * numbers, but that is going to limit the effectiveness of the
280 * mitigation only in very rare limit conditions.
281 */
282 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
283
284 /* Pick the next available xfer_id >= next_token */
285 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
286 MSG_TOKEN_MAX, next_token);
287 if (xfer_id == MSG_TOKEN_MAX) {
288 /*
289 * After heavily out-of-order responses, there are no free
290 * tokens ahead, but only at start of xfer_alloc_table so
291 * try again from the beginning.
292 */
293 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
294 MSG_TOKEN_MAX, 0);
295 /*
296 * Something is wrong if we got here since there can be a
297 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
298 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
299 */
300 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
301 return -ENOMEM;
302 }
303
304 /* Update +/- last_token accordingly if we skipped some hole */
305 if (xfer_id != next_token)
306 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
307
308 /* Set in-flight */
309 set_bit(xfer_id, minfo->xfer_alloc_table);
310 xfer->hdr.seq = (u16)xfer_id;
311
312 return 0;
313 }
314
315 /**
316 * scmi_xfer_token_clear - Release the token
317 *
318 * @minfo: Pointer to Tx/Rx Message management info based on channel type
319 * @xfer: The xfer to act upon
320 */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)321 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
322 struct scmi_xfer *xfer)
323 {
324 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
325 }
326
327 /**
328 * scmi_xfer_get() - Allocate one message
329 *
330 * @handle: Pointer to SCMI entity handle
331 * @minfo: Pointer to Tx/Rx Message management info based on channel type
332 * @set_pending: If true a monotonic token is picked and the xfer is added to
333 * the pending hash table.
334 *
335 * Helper function which is used by various message functions that are
336 * exposed to clients of this driver for allocating a message traffic event.
337 *
338 * Picks an xfer from the free list @free_xfers (if any available) and, if
339 * required, sets a monotonically increasing token and stores the inflight xfer
340 * into the @pending_xfers hashtable for later retrieval.
341 *
342 * The successfully initialized xfer is refcounted.
343 *
344 * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
345 * @free_xfers.
346 *
347 * Return: 0 if all went fine, else corresponding error.
348 */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo,bool set_pending)349 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
350 struct scmi_xfers_info *minfo,
351 bool set_pending)
352 {
353 int ret;
354 unsigned long flags;
355 struct scmi_xfer *xfer;
356
357 spin_lock_irqsave(&minfo->xfer_lock, flags);
358 if (hlist_empty(&minfo->free_xfers)) {
359 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
360 return ERR_PTR(-ENOMEM);
361 }
362
363 /* grab an xfer from the free_list */
364 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
365 hlist_del_init(&xfer->node);
366
367 /*
368 * Allocate transfer_id early so that can be used also as base for
369 * monotonic sequence number generation if needed.
370 */
371 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
372
373 if (set_pending) {
374 /* Pick and set monotonic token */
375 ret = scmi_xfer_token_set(minfo, xfer);
376 if (!ret) {
377 hash_add(minfo->pending_xfers, &xfer->node,
378 xfer->hdr.seq);
379 xfer->pending = true;
380 } else {
381 dev_err(handle->dev,
382 "Failed to get monotonic token %d\n", ret);
383 hlist_add_head(&xfer->node, &minfo->free_xfers);
384 xfer = ERR_PTR(ret);
385 }
386 }
387
388 if (!IS_ERR(xfer)) {
389 refcount_set(&xfer->users, 1);
390 atomic_set(&xfer->busy, SCMI_XFER_FREE);
391 }
392 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
393
394 return xfer;
395 }
396
397 /**
398 * __scmi_xfer_put() - Release a message
399 *
400 * @minfo: Pointer to Tx/Rx Message management info based on channel type
401 * @xfer: message that was reserved by scmi_xfer_get
402 *
403 * After refcount check, possibly release an xfer, clearing the token slot,
404 * removing xfer from @pending_xfers and putting it back into free_xfers.
405 *
406 * This holds a spinlock to maintain integrity of internal data structures.
407 */
408 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)409 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
410 {
411 unsigned long flags;
412
413 spin_lock_irqsave(&minfo->xfer_lock, flags);
414 if (refcount_dec_and_test(&xfer->users)) {
415 if (xfer->pending) {
416 scmi_xfer_token_clear(minfo, xfer);
417 hash_del(&xfer->node);
418 xfer->pending = false;
419 }
420 hlist_add_head(&xfer->node, &minfo->free_xfers);
421 }
422 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
423 }
424
425 /**
426 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
427 *
428 * @minfo: Pointer to Tx/Rx Message management info based on channel type
429 * @xfer_id: Token ID to lookup in @pending_xfers
430 *
431 * Refcounting is untouched.
432 *
433 * Context: Assumes to be called with @xfer_lock already acquired.
434 *
435 * Return: A valid xfer on Success or error otherwise
436 */
437 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)438 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
439 {
440 struct scmi_xfer *xfer = NULL;
441
442 if (test_bit(xfer_id, minfo->xfer_alloc_table))
443 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
444
445 return xfer ?: ERR_PTR(-EINVAL);
446 }
447
448 /**
449 * scmi_msg_response_validate - Validate message type against state of related
450 * xfer
451 *
452 * @cinfo: A reference to the channel descriptor.
453 * @msg_type: Message type to check
454 * @xfer: A reference to the xfer to validate against @msg_type
455 *
456 * This function checks if @msg_type is congruent with the current state of
457 * a pending @xfer; if an asynchronous delayed response is received before the
458 * related synchronous response (Out-of-Order Delayed Response) the missing
459 * synchronous response is assumed to be OK and completed, carrying on with the
460 * Delayed Response: this is done to address the case in which the underlying
461 * SCMI transport can deliver such out-of-order responses.
462 *
463 * Context: Assumes to be called with xfer->lock already acquired.
464 *
465 * Return: 0 on Success, error otherwise
466 */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)467 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
468 u8 msg_type,
469 struct scmi_xfer *xfer)
470 {
471 /*
472 * Even if a response was indeed expected on this slot at this point,
473 * a buggy platform could wrongly reply feeding us an unexpected
474 * delayed response we're not prepared to handle: bail-out safely
475 * blaming firmware.
476 */
477 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
478 dev_err(cinfo->dev,
479 "Delayed Response for %d not expected! Buggy F/W ?\n",
480 xfer->hdr.seq);
481 return -EINVAL;
482 }
483
484 switch (xfer->state) {
485 case SCMI_XFER_SENT_OK:
486 if (msg_type == MSG_TYPE_DELAYED_RESP) {
487 /*
488 * Delayed Response expected but delivered earlier.
489 * Assume message RESPONSE was OK and skip state.
490 */
491 xfer->hdr.status = SCMI_SUCCESS;
492 xfer->state = SCMI_XFER_RESP_OK;
493 complete(&xfer->done);
494 dev_warn(cinfo->dev,
495 "Received valid OoO Delayed Response for %d\n",
496 xfer->hdr.seq);
497 }
498 break;
499 case SCMI_XFER_RESP_OK:
500 if (msg_type != MSG_TYPE_DELAYED_RESP)
501 return -EINVAL;
502 break;
503 case SCMI_XFER_DRESP_OK:
504 /* No further message expected once in SCMI_XFER_DRESP_OK */
505 return -EINVAL;
506 }
507
508 return 0;
509 }
510
511 /**
512 * scmi_xfer_state_update - Update xfer state
513 *
514 * @xfer: A reference to the xfer to update
515 * @msg_type: Type of message being processed.
516 *
517 * Note that this message is assumed to have been already successfully validated
518 * by @scmi_msg_response_validate(), so here we just update the state.
519 *
520 * Context: Assumes to be called on an xfer exclusively acquired using the
521 * busy flag.
522 */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)523 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
524 {
525 xfer->hdr.type = msg_type;
526
527 /* Unknown command types were already discarded earlier */
528 if (xfer->hdr.type == MSG_TYPE_COMMAND)
529 xfer->state = SCMI_XFER_RESP_OK;
530 else
531 xfer->state = SCMI_XFER_DRESP_OK;
532 }
533
scmi_xfer_acquired(struct scmi_xfer * xfer)534 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
535 {
536 int ret;
537
538 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
539
540 return ret == SCMI_XFER_FREE;
541 }
542
543 /**
544 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
545 *
546 * @cinfo: A reference to the channel descriptor.
547 * @msg_hdr: A message header to use as lookup key
548 *
549 * When a valid xfer is found for the sequence number embedded in the provided
550 * msg_hdr, reference counting is properly updated and exclusive access to this
551 * xfer is granted till released with @scmi_xfer_command_release.
552 *
553 * Return: A valid @xfer on Success or error otherwise.
554 */
555 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)556 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
557 {
558 int ret;
559 unsigned long flags;
560 struct scmi_xfer *xfer;
561 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
562 struct scmi_xfers_info *minfo = &info->tx_minfo;
563 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
564 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
565
566 /* Are we even expecting this? */
567 spin_lock_irqsave(&minfo->xfer_lock, flags);
568 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
569 if (IS_ERR(xfer)) {
570 dev_err(cinfo->dev,
571 "Message for %d type %d is not expected!\n",
572 xfer_id, msg_type);
573 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
574 return xfer;
575 }
576 refcount_inc(&xfer->users);
577 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
578
579 spin_lock_irqsave(&xfer->lock, flags);
580 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
581 /*
582 * If a pending xfer was found which was also in a congruent state with
583 * the received message, acquire exclusive access to it setting the busy
584 * flag.
585 * Spins only on the rare limit condition of concurrent reception of
586 * RESP and DRESP for the same xfer.
587 */
588 if (!ret) {
589 spin_until_cond(scmi_xfer_acquired(xfer));
590 scmi_xfer_state_update(xfer, msg_type);
591 }
592 spin_unlock_irqrestore(&xfer->lock, flags);
593
594 if (ret) {
595 dev_err(cinfo->dev,
596 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
597 msg_type, xfer_id, msg_hdr, xfer->state);
598 /* On error the refcount incremented above has to be dropped */
599 __scmi_xfer_put(minfo, xfer);
600 xfer = ERR_PTR(-EINVAL);
601 }
602
603 return xfer;
604 }
605
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)606 static inline void scmi_xfer_command_release(struct scmi_info *info,
607 struct scmi_xfer *xfer)
608 {
609 atomic_set(&xfer->busy, SCMI_XFER_FREE);
610 __scmi_xfer_put(&info->tx_minfo, xfer);
611 }
612
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)613 static inline void scmi_clear_channel(struct scmi_info *info,
614 struct scmi_chan_info *cinfo)
615 {
616 if (info->desc->ops->clear_channel)
617 info->desc->ops->clear_channel(cinfo);
618 }
619
is_polling_required(struct scmi_chan_info * cinfo,struct scmi_info * info)620 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
621 struct scmi_info *info)
622 {
623 return cinfo->no_completion_irq || info->desc->force_polling;
624 }
625
is_transport_polling_capable(struct scmi_info * info)626 static inline bool is_transport_polling_capable(struct scmi_info *info)
627 {
628 return info->desc->ops->poll_done ||
629 info->desc->sync_cmds_completed_on_ret;
630 }
631
is_polling_enabled(struct scmi_chan_info * cinfo,struct scmi_info * info)632 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
633 struct scmi_info *info)
634 {
635 return is_polling_required(cinfo, info) &&
636 is_transport_polling_capable(info);
637 }
638
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)639 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
640 u32 msg_hdr, void *priv)
641 {
642 struct scmi_xfer *xfer;
643 struct device *dev = cinfo->dev;
644 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
645 struct scmi_xfers_info *minfo = &info->rx_minfo;
646 ktime_t ts;
647
648 ts = ktime_get_boottime();
649 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
650 if (IS_ERR(xfer)) {
651 dev_err(dev, "failed to get free message slot (%ld)\n",
652 PTR_ERR(xfer));
653 scmi_clear_channel(info, cinfo);
654 return;
655 }
656
657 unpack_scmi_header(msg_hdr, &xfer->hdr);
658 if (priv)
659 /* Ensure order between xfer->priv store and following ops */
660 smp_store_mb(xfer->priv, priv);
661 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
662 xfer);
663 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
664 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
665
666 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
667 xfer->hdr.protocol_id, xfer->hdr.seq,
668 MSG_TYPE_NOTIFICATION);
669
670 __scmi_xfer_put(minfo, xfer);
671
672 scmi_clear_channel(info, cinfo);
673 }
674
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)675 static void scmi_handle_response(struct scmi_chan_info *cinfo,
676 u32 msg_hdr, void *priv)
677 {
678 struct scmi_xfer *xfer;
679 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
680
681 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
682 if (IS_ERR(xfer)) {
683 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
684 scmi_clear_channel(info, cinfo);
685 return;
686 }
687
688 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
689 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
690 xfer->rx.len = info->desc->max_msg_size;
691
692 if (priv)
693 /* Ensure order between xfer->priv store and following ops */
694 smp_store_mb(xfer->priv, priv);
695 info->desc->ops->fetch_response(cinfo, xfer);
696
697 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
698 xfer->hdr.protocol_id, xfer->hdr.seq,
699 xfer->hdr.type);
700
701 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
702 scmi_clear_channel(info, cinfo);
703 complete(xfer->async_done);
704 } else {
705 complete(&xfer->done);
706 }
707
708 scmi_xfer_command_release(info, xfer);
709 }
710
711 /**
712 * scmi_rx_callback() - callback for receiving messages
713 *
714 * @cinfo: SCMI channel info
715 * @msg_hdr: Message header
716 * @priv: Transport specific private data.
717 *
718 * Processes one received message to appropriate transfer information and
719 * signals completion of the transfer.
720 *
721 * NOTE: This function will be invoked in IRQ context, hence should be
722 * as optimal as possible.
723 */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)724 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
725 {
726 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
727
728 switch (msg_type) {
729 case MSG_TYPE_NOTIFICATION:
730 scmi_handle_notification(cinfo, msg_hdr, priv);
731 break;
732 case MSG_TYPE_COMMAND:
733 case MSG_TYPE_DELAYED_RESP:
734 scmi_handle_response(cinfo, msg_hdr, priv);
735 break;
736 default:
737 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
738 break;
739 }
740 }
741
742 /**
743 * xfer_put() - Release a transmit message
744 *
745 * @ph: Pointer to SCMI protocol handle
746 * @xfer: message that was reserved by xfer_get_init
747 */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)748 static void xfer_put(const struct scmi_protocol_handle *ph,
749 struct scmi_xfer *xfer)
750 {
751 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
752 struct scmi_info *info = handle_to_scmi_info(pi->handle);
753
754 __scmi_xfer_put(&info->tx_minfo, xfer);
755 }
756
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop)757 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
758 struct scmi_xfer *xfer, ktime_t stop)
759 {
760 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
761
762 /*
763 * Poll also on xfer->done so that polling can be forcibly terminated
764 * in case of out-of-order receptions of delayed responses
765 */
766 return info->desc->ops->poll_done(cinfo, xfer) ||
767 try_wait_for_completion(&xfer->done) ||
768 ktime_after(ktime_get(), stop);
769 }
770
771 /**
772 * scmi_wait_for_message_response - An helper to group all the possible ways of
773 * waiting for a synchronous message response.
774 *
775 * @cinfo: SCMI channel info
776 * @xfer: Reference to the transfer being waited for.
777 *
778 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
779 * configuration flags like xfer->hdr.poll_completion.
780 *
781 * Return: 0 on Success, error otherwise.
782 */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)783 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
784 struct scmi_xfer *xfer)
785 {
786 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
787 struct device *dev = info->dev;
788 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
789
790 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
791 xfer->hdr.protocol_id, xfer->hdr.seq,
792 timeout_ms,
793 xfer->hdr.poll_completion);
794
795 if (xfer->hdr.poll_completion) {
796 /*
797 * Real polling is needed only if transport has NOT declared
798 * itself to support synchronous commands replies.
799 */
800 if (!info->desc->sync_cmds_completed_on_ret) {
801 /*
802 * Poll on xfer using transport provided .poll_done();
803 * assumes no completion interrupt was available.
804 */
805 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
806
807 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
808 xfer, stop));
809 if (ktime_after(ktime_get(), stop)) {
810 dev_err(dev,
811 "timed out in resp(caller: %pS) - polling\n",
812 (void *)_RET_IP_);
813 ret = -ETIMEDOUT;
814 }
815 }
816
817 if (!ret) {
818 unsigned long flags;
819
820 /*
821 * Do not fetch_response if an out-of-order delayed
822 * response is being processed.
823 */
824 spin_lock_irqsave(&xfer->lock, flags);
825 if (xfer->state == SCMI_XFER_SENT_OK) {
826 info->desc->ops->fetch_response(cinfo, xfer);
827 xfer->state = SCMI_XFER_RESP_OK;
828 }
829 spin_unlock_irqrestore(&xfer->lock, flags);
830 }
831 } else {
832 /* And we wait for the response. */
833 if (!wait_for_completion_timeout(&xfer->done,
834 msecs_to_jiffies(timeout_ms))) {
835 dev_err(dev, "timed out in resp(caller: %pS)\n",
836 (void *)_RET_IP_);
837 ret = -ETIMEDOUT;
838 }
839 }
840
841 return ret;
842 }
843
844 /**
845 * do_xfer() - Do one transfer
846 *
847 * @ph: Pointer to SCMI protocol handle
848 * @xfer: Transfer to initiate and wait for response
849 *
850 * Return: -ETIMEDOUT in case of no response, if transmit error,
851 * return corresponding error, else if all goes well,
852 * return 0.
853 */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)854 static int do_xfer(const struct scmi_protocol_handle *ph,
855 struct scmi_xfer *xfer)
856 {
857 int ret;
858 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
859 struct scmi_info *info = handle_to_scmi_info(pi->handle);
860 struct device *dev = info->dev;
861 struct scmi_chan_info *cinfo;
862
863 /* Check for polling request on custom command xfers at first */
864 if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
865 dev_warn_once(dev,
866 "Polling mode is not supported by transport.\n");
867 return -EINVAL;
868 }
869
870 cinfo = idr_find(&info->tx_idr, pi->proto->id);
871 if (unlikely(!cinfo))
872 return -EINVAL;
873
874 /* True ONLY if also supported by transport. */
875 if (is_polling_enabled(cinfo, info))
876 xfer->hdr.poll_completion = true;
877
878 /*
879 * Initialise protocol id now from protocol handle to avoid it being
880 * overridden by mistake (or malice) by the protocol code mangling with
881 * the scmi_xfer structure prior to this.
882 */
883 xfer->hdr.protocol_id = pi->proto->id;
884 reinit_completion(&xfer->done);
885
886 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
887 xfer->hdr.protocol_id, xfer->hdr.seq,
888 xfer->hdr.poll_completion);
889
890 xfer->state = SCMI_XFER_SENT_OK;
891 /*
892 * Even though spinlocking is not needed here since no race is possible
893 * on xfer->state due to the monotonically increasing tokens allocation,
894 * we must anyway ensure xfer->state initialization is not re-ordered
895 * after the .send_message() to be sure that on the RX path an early
896 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
897 */
898 smp_mb();
899
900 ret = info->desc->ops->send_message(cinfo, xfer);
901 if (ret < 0) {
902 dev_dbg(dev, "Failed to send message %d\n", ret);
903 return ret;
904 }
905
906 ret = scmi_wait_for_message_response(cinfo, xfer);
907 if (!ret && xfer->hdr.status)
908 ret = scmi_to_linux_errno(xfer->hdr.status);
909
910 if (info->desc->ops->mark_txdone)
911 info->desc->ops->mark_txdone(cinfo, ret, xfer);
912
913 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
914 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
915
916 return ret;
917 }
918
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)919 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
920 struct scmi_xfer *xfer)
921 {
922 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
923 struct scmi_info *info = handle_to_scmi_info(pi->handle);
924
925 xfer->rx.len = info->desc->max_msg_size;
926 }
927
928 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
929
930 /**
931 * do_xfer_with_response() - Do one transfer and wait until the delayed
932 * response is received
933 *
934 * @ph: Pointer to SCMI protocol handle
935 * @xfer: Transfer to initiate and wait for response
936 *
937 * Using asynchronous commands in atomic/polling mode should be avoided since
938 * it could cause long busy-waiting here, so ignore polling for the delayed
939 * response and WARN if it was requested for this command transaction since
940 * upper layers should refrain from issuing such kind of requests.
941 *
942 * The only other option would have been to refrain from using any asynchronous
943 * command even if made available, when an atomic transport is detected, and
944 * instead forcibly use the synchronous version (thing that can be easily
945 * attained at the protocol layer), but this would also have led to longer
946 * stalls of the channel for synchronous commands and possibly timeouts.
947 * (in other words there is usually a good reason if a platform provides an
948 * asynchronous version of a command and we should prefer to use it...just not
949 * when using atomic/polling mode)
950 *
951 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
952 * return corresponding error, else if all goes well, return 0.
953 */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)954 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
955 struct scmi_xfer *xfer)
956 {
957 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
958 DECLARE_COMPLETION_ONSTACK(async_response);
959
960 xfer->async_done = &async_response;
961
962 /*
963 * Delayed responses should not be polled, so an async command should
964 * not have been used when requiring an atomic/poll context; WARN and
965 * perform instead a sleeping wait.
966 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
967 */
968 WARN_ON_ONCE(xfer->hdr.poll_completion);
969
970 ret = do_xfer(ph, xfer);
971 if (!ret) {
972 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
973 dev_err(ph->dev,
974 "timed out in delayed resp(caller: %pS)\n",
975 (void *)_RET_IP_);
976 ret = -ETIMEDOUT;
977 } else if (xfer->hdr.status) {
978 ret = scmi_to_linux_errno(xfer->hdr.status);
979 }
980 }
981
982 xfer->async_done = NULL;
983 return ret;
984 }
985
986 /**
987 * xfer_get_init() - Allocate and initialise one message for transmit
988 *
989 * @ph: Pointer to SCMI protocol handle
990 * @msg_id: Message identifier
991 * @tx_size: transmit message size
992 * @rx_size: receive message size
993 * @p: pointer to the allocated and initialised message
994 *
995 * This function allocates the message using @scmi_xfer_get and
996 * initialise the header.
997 *
998 * Return: 0 if all went fine with @p pointing to message, else
999 * corresponding error.
1000 */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1001 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1002 u8 msg_id, size_t tx_size, size_t rx_size,
1003 struct scmi_xfer **p)
1004 {
1005 int ret;
1006 struct scmi_xfer *xfer;
1007 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1008 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1009 struct scmi_xfers_info *minfo = &info->tx_minfo;
1010 struct device *dev = info->dev;
1011
1012 /* Ensure we have sane transfer sizes */
1013 if (rx_size > info->desc->max_msg_size ||
1014 tx_size > info->desc->max_msg_size)
1015 return -ERANGE;
1016
1017 xfer = scmi_xfer_get(pi->handle, minfo, true);
1018 if (IS_ERR(xfer)) {
1019 ret = PTR_ERR(xfer);
1020 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1021 return ret;
1022 }
1023
1024 xfer->tx.len = tx_size;
1025 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1026 xfer->hdr.type = MSG_TYPE_COMMAND;
1027 xfer->hdr.id = msg_id;
1028 xfer->hdr.poll_completion = false;
1029
1030 *p = xfer;
1031
1032 return 0;
1033 }
1034
1035 /**
1036 * version_get() - command to get the revision of the SCMI entity
1037 *
1038 * @ph: Pointer to SCMI protocol handle
1039 * @version: Holds returned version of protocol.
1040 *
1041 * Updates the SCMI information in the internal data structure.
1042 *
1043 * Return: 0 if all went fine, else return appropriate error.
1044 */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1045 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1046 {
1047 int ret;
1048 __le32 *rev_info;
1049 struct scmi_xfer *t;
1050
1051 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1052 if (ret)
1053 return ret;
1054
1055 ret = do_xfer(ph, t);
1056 if (!ret) {
1057 rev_info = t->rx.buf;
1058 *version = le32_to_cpu(*rev_info);
1059 }
1060
1061 xfer_put(ph, t);
1062 return ret;
1063 }
1064
1065 /**
1066 * scmi_set_protocol_priv - Set protocol specific data at init time
1067 *
1068 * @ph: A reference to the protocol handle.
1069 * @priv: The private data to set.
1070 *
1071 * Return: 0 on Success
1072 */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv)1073 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1074 void *priv)
1075 {
1076 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1077
1078 pi->priv = priv;
1079
1080 return 0;
1081 }
1082
1083 /**
1084 * scmi_get_protocol_priv - Set protocol specific data at init time
1085 *
1086 * @ph: A reference to the protocol handle.
1087 *
1088 * Return: Protocol private data if any was set.
1089 */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1090 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1091 {
1092 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1093
1094 return pi->priv;
1095 }
1096
1097 static const struct scmi_xfer_ops xfer_ops = {
1098 .version_get = version_get,
1099 .xfer_get_init = xfer_get_init,
1100 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1101 .do_xfer = do_xfer,
1102 .do_xfer_with_response = do_xfer_with_response,
1103 .xfer_put = xfer_put,
1104 };
1105
1106 struct scmi_msg_resp_domain_name_get {
1107 __le32 flags;
1108 u8 name[SCMI_MAX_STR_SIZE];
1109 };
1110
1111 /**
1112 * scmi_common_extended_name_get - Common helper to get extended resources name
1113 * @ph: A protocol handle reference.
1114 * @cmd_id: The specific command ID to use.
1115 * @res_id: The specific resource ID to use.
1116 * @name: A pointer to the preallocated area where the retrieved name will be
1117 * stored as a NULL terminated string.
1118 * @len: The len in bytes of the @name char array.
1119 *
1120 * Return: 0 on Succcess
1121 */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,char * name,size_t len)1122 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1123 u8 cmd_id, u32 res_id, char *name,
1124 size_t len)
1125 {
1126 int ret;
1127 struct scmi_xfer *t;
1128 struct scmi_msg_resp_domain_name_get *resp;
1129
1130 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
1131 sizeof(*resp), &t);
1132 if (ret)
1133 goto out;
1134
1135 put_unaligned_le32(res_id, t->tx.buf);
1136 resp = t->rx.buf;
1137
1138 ret = ph->xops->do_xfer(ph, t);
1139 if (!ret)
1140 strscpy(name, resp->name, len);
1141
1142 ph->xops->xfer_put(ph, t);
1143 out:
1144 if (ret)
1145 dev_warn(ph->dev,
1146 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1147 res_id, ret, name);
1148 return ret;
1149 }
1150
1151 /**
1152 * struct scmi_iterator - Iterator descriptor
1153 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1154 * a proper custom command payload for each multi-part command request.
1155 * @resp: A reference to the response RX buffer; used by @update_state and
1156 * @process_response to parse the multi-part replies.
1157 * @t: A reference to the underlying xfer initialized and used transparently by
1158 * the iterator internal routines.
1159 * @ph: A reference to the associated protocol handle to be used.
1160 * @ops: A reference to the custom provided iterator operations.
1161 * @state: The current iterator state; used and updated in turn by the iterators
1162 * internal routines and by the caller-provided @scmi_iterator_ops.
1163 * @priv: A reference to optional private data as provided by the caller and
1164 * passed back to the @@scmi_iterator_ops.
1165 */
1166 struct scmi_iterator {
1167 void *msg;
1168 void *resp;
1169 struct scmi_xfer *t;
1170 const struct scmi_protocol_handle *ph;
1171 struct scmi_iterator_ops *ops;
1172 struct scmi_iterator_state state;
1173 void *priv;
1174 };
1175
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1176 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1177 struct scmi_iterator_ops *ops,
1178 unsigned int max_resources, u8 msg_id,
1179 size_t tx_size, void *priv)
1180 {
1181 int ret;
1182 struct scmi_iterator *i;
1183
1184 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1185 if (!i)
1186 return ERR_PTR(-ENOMEM);
1187
1188 i->ph = ph;
1189 i->ops = ops;
1190 i->priv = priv;
1191
1192 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1193 if (ret) {
1194 devm_kfree(ph->dev, i);
1195 return ERR_PTR(ret);
1196 }
1197
1198 i->state.max_resources = max_resources;
1199 i->msg = i->t->tx.buf;
1200 i->resp = i->t->rx.buf;
1201
1202 return i;
1203 }
1204
scmi_iterator_run(void * iter)1205 static int scmi_iterator_run(void *iter)
1206 {
1207 int ret = -EINVAL;
1208 struct scmi_iterator_ops *iops;
1209 const struct scmi_protocol_handle *ph;
1210 struct scmi_iterator_state *st;
1211 struct scmi_iterator *i = iter;
1212
1213 if (!i || !i->ops || !i->ph)
1214 return ret;
1215
1216 iops = i->ops;
1217 ph = i->ph;
1218 st = &i->state;
1219
1220 do {
1221 iops->prepare_message(i->msg, st->desc_index, i->priv);
1222 ret = ph->xops->do_xfer(ph, i->t);
1223 if (ret)
1224 break;
1225
1226 st->rx_len = i->t->rx.len;
1227 ret = iops->update_state(st, i->resp, i->priv);
1228 if (ret)
1229 break;
1230
1231 if (st->num_returned > st->max_resources - st->desc_index) {
1232 dev_err(ph->dev,
1233 "No. of resources can't exceed %d\n",
1234 st->max_resources);
1235 ret = -EINVAL;
1236 break;
1237 }
1238
1239 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1240 st->loop_idx++) {
1241 ret = iops->process_response(ph, i->resp, st, i->priv);
1242 if (ret)
1243 goto out;
1244 }
1245
1246 st->desc_index += st->num_returned;
1247 ph->xops->reset_rx_to_maxsz(ph, i->t);
1248 /*
1249 * check for both returned and remaining to avoid infinite
1250 * loop due to buggy firmware
1251 */
1252 } while (st->num_returned && st->num_remaining);
1253
1254 out:
1255 /* Finalize and destroy iterator */
1256 ph->xops->xfer_put(ph, i->t);
1257 devm_kfree(ph->dev, i);
1258
1259 return ret;
1260 }
1261
1262 static const struct scmi_proto_helpers_ops helpers_ops = {
1263 .extended_name_get = scmi_common_extended_name_get,
1264 .iter_response_init = scmi_iterator_init,
1265 .iter_response_run = scmi_iterator_run,
1266 };
1267
1268 /**
1269 * scmi_revision_area_get - Retrieve version memory area.
1270 *
1271 * @ph: A reference to the protocol handle.
1272 *
1273 * A helper to grab the version memory area reference during SCMI Base protocol
1274 * initialization.
1275 *
1276 * Return: A reference to the version memory area associated to the SCMI
1277 * instance underlying this protocol handle.
1278 */
1279 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)1280 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1281 {
1282 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1283
1284 return pi->handle->version;
1285 }
1286
1287 /**
1288 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1289 * instance descriptor.
1290 * @info: The reference to the related SCMI instance.
1291 * @proto: The protocol descriptor.
1292 *
1293 * Allocate a new protocol instance descriptor, using the provided @proto
1294 * description, against the specified SCMI instance @info, and initialize it;
1295 * all resources management is handled via a dedicated per-protocol devres
1296 * group.
1297 *
1298 * Context: Assumes to be called with @protocols_mtx already acquired.
1299 * Return: A reference to a freshly allocated and initialized protocol instance
1300 * or ERR_PTR on failure. On failure the @proto reference is at first
1301 * put using @scmi_protocol_put() before releasing all the devres group.
1302 */
1303 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)1304 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1305 const struct scmi_protocol *proto)
1306 {
1307 int ret = -ENOMEM;
1308 void *gid;
1309 struct scmi_protocol_instance *pi;
1310 const struct scmi_handle *handle = &info->handle;
1311
1312 /* Protocol specific devres group */
1313 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1314 if (!gid) {
1315 scmi_protocol_put(proto->id);
1316 goto out;
1317 }
1318
1319 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1320 if (!pi)
1321 goto clean;
1322
1323 pi->gid = gid;
1324 pi->proto = proto;
1325 pi->handle = handle;
1326 pi->ph.dev = handle->dev;
1327 pi->ph.xops = &xfer_ops;
1328 pi->ph.hops = &helpers_ops;
1329 pi->ph.set_priv = scmi_set_protocol_priv;
1330 pi->ph.get_priv = scmi_get_protocol_priv;
1331 refcount_set(&pi->users, 1);
1332 /* proto->init is assured NON NULL by scmi_protocol_register */
1333 ret = pi->proto->instance_init(&pi->ph);
1334 if (ret)
1335 goto clean;
1336
1337 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1338 GFP_KERNEL);
1339 if (ret != proto->id)
1340 goto clean;
1341
1342 /*
1343 * Warn but ignore events registration errors since we do not want
1344 * to skip whole protocols if their notifications are messed up.
1345 */
1346 if (pi->proto->events) {
1347 ret = scmi_register_protocol_events(handle, pi->proto->id,
1348 &pi->ph,
1349 pi->proto->events);
1350 if (ret)
1351 dev_warn(handle->dev,
1352 "Protocol:%X - Events Registration Failed - err:%d\n",
1353 pi->proto->id, ret);
1354 }
1355
1356 devres_close_group(handle->dev, pi->gid);
1357 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1358
1359 return pi;
1360
1361 clean:
1362 /* Take care to put the protocol module's owner before releasing all */
1363 scmi_protocol_put(proto->id);
1364 devres_release_group(handle->dev, gid);
1365 out:
1366 return ERR_PTR(ret);
1367 }
1368
1369 /**
1370 * scmi_get_protocol_instance - Protocol initialization helper.
1371 * @handle: A reference to the SCMI platform instance.
1372 * @protocol_id: The protocol being requested.
1373 *
1374 * In case the required protocol has never been requested before for this
1375 * instance, allocate and initialize all the needed structures while handling
1376 * resource allocation with a dedicated per-protocol devres subgroup.
1377 *
1378 * Return: A reference to an initialized protocol instance or error on failure:
1379 * in particular returns -EPROBE_DEFER when the desired protocol could
1380 * NOT be found.
1381 */
1382 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)1383 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1384 {
1385 struct scmi_protocol_instance *pi;
1386 struct scmi_info *info = handle_to_scmi_info(handle);
1387
1388 mutex_lock(&info->protocols_mtx);
1389 pi = idr_find(&info->protocols, protocol_id);
1390
1391 if (pi) {
1392 refcount_inc(&pi->users);
1393 } else {
1394 const struct scmi_protocol *proto;
1395
1396 /* Fails if protocol not registered on bus */
1397 proto = scmi_protocol_get(protocol_id);
1398 if (proto)
1399 pi = scmi_alloc_init_protocol_instance(info, proto);
1400 else
1401 pi = ERR_PTR(-EPROBE_DEFER);
1402 }
1403 mutex_unlock(&info->protocols_mtx);
1404
1405 return pi;
1406 }
1407
1408 /**
1409 * scmi_protocol_acquire - Protocol acquire
1410 * @handle: A reference to the SCMI platform instance.
1411 * @protocol_id: The protocol being requested.
1412 *
1413 * Register a new user for the requested protocol on the specified SCMI
1414 * platform instance, possibly triggering its initialization on first user.
1415 *
1416 * Return: 0 if protocol was acquired successfully.
1417 */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)1418 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1419 {
1420 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1421 }
1422
1423 /**
1424 * scmi_protocol_release - Protocol de-initialization helper.
1425 * @handle: A reference to the SCMI platform instance.
1426 * @protocol_id: The protocol being requested.
1427 *
1428 * Remove one user for the specified protocol and triggers de-initialization
1429 * and resources de-allocation once the last user has gone.
1430 */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)1431 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1432 {
1433 struct scmi_info *info = handle_to_scmi_info(handle);
1434 struct scmi_protocol_instance *pi;
1435
1436 mutex_lock(&info->protocols_mtx);
1437 pi = idr_find(&info->protocols, protocol_id);
1438 if (WARN_ON(!pi))
1439 goto out;
1440
1441 if (refcount_dec_and_test(&pi->users)) {
1442 void *gid = pi->gid;
1443
1444 if (pi->proto->events)
1445 scmi_deregister_protocol_events(handle, protocol_id);
1446
1447 if (pi->proto->instance_deinit)
1448 pi->proto->instance_deinit(&pi->ph);
1449
1450 idr_remove(&info->protocols, protocol_id);
1451
1452 scmi_protocol_put(protocol_id);
1453
1454 devres_release_group(handle->dev, gid);
1455 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1456 protocol_id);
1457 }
1458
1459 out:
1460 mutex_unlock(&info->protocols_mtx);
1461 }
1462
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)1463 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1464 u8 *prot_imp)
1465 {
1466 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1467 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1468
1469 info->protocols_imp = prot_imp;
1470 }
1471
1472 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)1473 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1474 {
1475 int i;
1476 struct scmi_info *info = handle_to_scmi_info(handle);
1477 struct scmi_revision_info *rev = handle->version;
1478
1479 if (!info->protocols_imp)
1480 return false;
1481
1482 for (i = 0; i < rev->num_protocols; i++)
1483 if (info->protocols_imp[i] == prot_id)
1484 return true;
1485 return false;
1486 }
1487
1488 struct scmi_protocol_devres {
1489 const struct scmi_handle *handle;
1490 u8 protocol_id;
1491 };
1492
scmi_devm_release_protocol(struct device * dev,void * res)1493 static void scmi_devm_release_protocol(struct device *dev, void *res)
1494 {
1495 struct scmi_protocol_devres *dres = res;
1496
1497 scmi_protocol_release(dres->handle, dres->protocol_id);
1498 }
1499
1500 /**
1501 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1502 * @sdev: A reference to an scmi_device whose embedded struct device is to
1503 * be used for devres accounting.
1504 * @protocol_id: The protocol being requested.
1505 * @ph: A pointer reference used to pass back the associated protocol handle.
1506 *
1507 * Get hold of a protocol accounting for its usage, eventually triggering its
1508 * initialization, and returning the protocol specific operations and related
1509 * protocol handle which will be used as first argument in most of the
1510 * protocols operations methods.
1511 * Being a devres based managed method, protocol hold will be automatically
1512 * released, and possibly de-initialized on last user, once the SCMI driver
1513 * owning the scmi_device is unbound from it.
1514 *
1515 * Return: A reference to the requested protocol operations or error.
1516 * Must be checked for errors by caller.
1517 */
1518 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)1519 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1520 struct scmi_protocol_handle **ph)
1521 {
1522 struct scmi_protocol_instance *pi;
1523 struct scmi_protocol_devres *dres;
1524 struct scmi_handle *handle = sdev->handle;
1525
1526 if (!ph)
1527 return ERR_PTR(-EINVAL);
1528
1529 dres = devres_alloc(scmi_devm_release_protocol,
1530 sizeof(*dres), GFP_KERNEL);
1531 if (!dres)
1532 return ERR_PTR(-ENOMEM);
1533
1534 pi = scmi_get_protocol_instance(handle, protocol_id);
1535 if (IS_ERR(pi)) {
1536 devres_free(dres);
1537 return pi;
1538 }
1539
1540 dres->handle = handle;
1541 dres->protocol_id = protocol_id;
1542 devres_add(&sdev->dev, dres);
1543
1544 *ph = &pi->ph;
1545
1546 return pi->proto->ops;
1547 }
1548
scmi_devm_protocol_match(struct device * dev,void * res,void * data)1549 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1550 {
1551 struct scmi_protocol_devres *dres = res;
1552
1553 if (WARN_ON(!dres || !data))
1554 return 0;
1555
1556 return dres->protocol_id == *((u8 *)data);
1557 }
1558
1559 /**
1560 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1561 * @sdev: A reference to an scmi_device whose embedded struct device is to
1562 * be used for devres accounting.
1563 * @protocol_id: The protocol being requested.
1564 *
1565 * Explicitly release a protocol hold previously obtained calling the above
1566 * @scmi_devm_protocol_get.
1567 */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)1568 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1569 {
1570 int ret;
1571
1572 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1573 scmi_devm_protocol_match, &protocol_id);
1574 WARN_ON(ret);
1575 }
1576
1577 /**
1578 * scmi_is_transport_atomic - Method to check if underlying transport for an
1579 * SCMI instance is configured as atomic.
1580 *
1581 * @handle: A reference to the SCMI platform instance.
1582 * @atomic_threshold: An optional return value for the system wide currently
1583 * configured threshold for atomic operations.
1584 *
1585 * Return: True if transport is configured as atomic
1586 */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)1587 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
1588 unsigned int *atomic_threshold)
1589 {
1590 bool ret;
1591 struct scmi_info *info = handle_to_scmi_info(handle);
1592
1593 ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
1594 if (ret && atomic_threshold)
1595 *atomic_threshold = info->atomic_threshold;
1596
1597 return ret;
1598 }
1599
1600 static inline
scmi_handle_get_from_info_unlocked(struct scmi_info * info)1601 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1602 {
1603 info->users++;
1604 return &info->handle;
1605 }
1606
1607 /**
1608 * scmi_handle_get() - Get the SCMI handle for a device
1609 *
1610 * @dev: pointer to device for which we want SCMI handle
1611 *
1612 * NOTE: The function does not track individual clients of the framework
1613 * and is expected to be maintained by caller of SCMI protocol library.
1614 * scmi_handle_put must be balanced with successful scmi_handle_get
1615 *
1616 * Return: pointer to handle if successful, NULL on error
1617 */
scmi_handle_get(struct device * dev)1618 struct scmi_handle *scmi_handle_get(struct device *dev)
1619 {
1620 struct list_head *p;
1621 struct scmi_info *info;
1622 struct scmi_handle *handle = NULL;
1623
1624 mutex_lock(&scmi_list_mutex);
1625 list_for_each(p, &scmi_list) {
1626 info = list_entry(p, struct scmi_info, node);
1627 if (dev->parent == info->dev) {
1628 handle = scmi_handle_get_from_info_unlocked(info);
1629 break;
1630 }
1631 }
1632 mutex_unlock(&scmi_list_mutex);
1633
1634 return handle;
1635 }
1636
1637 /**
1638 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1639 *
1640 * @handle: handle acquired by scmi_handle_get
1641 *
1642 * NOTE: The function does not track individual clients of the framework
1643 * and is expected to be maintained by caller of SCMI protocol library.
1644 * scmi_handle_put must be balanced with successful scmi_handle_get
1645 *
1646 * Return: 0 is successfully released
1647 * if null was passed, it returns -EINVAL;
1648 */
scmi_handle_put(const struct scmi_handle * handle)1649 int scmi_handle_put(const struct scmi_handle *handle)
1650 {
1651 struct scmi_info *info;
1652
1653 if (!handle)
1654 return -EINVAL;
1655
1656 info = handle_to_scmi_info(handle);
1657 mutex_lock(&scmi_list_mutex);
1658 if (!WARN_ON(!info->users))
1659 info->users--;
1660 mutex_unlock(&scmi_list_mutex);
1661
1662 return 0;
1663 }
1664
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)1665 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1666 struct scmi_xfers_info *info)
1667 {
1668 int i;
1669 struct scmi_xfer *xfer;
1670 struct device *dev = sinfo->dev;
1671 const struct scmi_desc *desc = sinfo->desc;
1672
1673 /* Pre-allocated messages, no more than what hdr.seq can support */
1674 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
1675 dev_err(dev,
1676 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1677 info->max_msg, MSG_TOKEN_MAX);
1678 return -EINVAL;
1679 }
1680
1681 hash_init(info->pending_xfers);
1682
1683 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1684 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1685 sizeof(long), GFP_KERNEL);
1686 if (!info->xfer_alloc_table)
1687 return -ENOMEM;
1688
1689 /*
1690 * Preallocate a number of xfers equal to max inflight messages,
1691 * pre-initialize the buffer pointer to pre-allocated buffers and
1692 * attach all of them to the free list
1693 */
1694 INIT_HLIST_HEAD(&info->free_xfers);
1695 for (i = 0; i < info->max_msg; i++) {
1696 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1697 if (!xfer)
1698 return -ENOMEM;
1699
1700 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1701 GFP_KERNEL);
1702 if (!xfer->rx.buf)
1703 return -ENOMEM;
1704
1705 xfer->tx.buf = xfer->rx.buf;
1706 init_completion(&xfer->done);
1707 spin_lock_init(&xfer->lock);
1708
1709 /* Add initialized xfer to the free list */
1710 hlist_add_head(&xfer->node, &info->free_xfers);
1711 }
1712
1713 spin_lock_init(&info->xfer_lock);
1714
1715 return 0;
1716 }
1717
scmi_channels_max_msg_configure(struct scmi_info * sinfo)1718 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1719 {
1720 const struct scmi_desc *desc = sinfo->desc;
1721
1722 if (!desc->ops->get_max_msg) {
1723 sinfo->tx_minfo.max_msg = desc->max_msg;
1724 sinfo->rx_minfo.max_msg = desc->max_msg;
1725 } else {
1726 struct scmi_chan_info *base_cinfo;
1727
1728 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1729 if (!base_cinfo)
1730 return -EINVAL;
1731 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1732
1733 /* RX channel is optional so can be skipped */
1734 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1735 if (base_cinfo)
1736 sinfo->rx_minfo.max_msg =
1737 desc->ops->get_max_msg(base_cinfo);
1738 }
1739
1740 return 0;
1741 }
1742
scmi_xfer_info_init(struct scmi_info * sinfo)1743 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1744 {
1745 int ret;
1746
1747 ret = scmi_channels_max_msg_configure(sinfo);
1748 if (ret)
1749 return ret;
1750
1751 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1752 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1753 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1754
1755 return ret;
1756 }
1757
scmi_chan_setup(struct scmi_info * info,struct device * dev,int prot_id,bool tx)1758 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1759 int prot_id, bool tx)
1760 {
1761 int ret, idx;
1762 struct scmi_chan_info *cinfo;
1763 struct idr *idr;
1764
1765 /* Transmit channel is first entry i.e. index 0 */
1766 idx = tx ? 0 : 1;
1767 idr = tx ? &info->tx_idr : &info->rx_idr;
1768
1769 /* check if already allocated, used for multiple device per protocol */
1770 cinfo = idr_find(idr, prot_id);
1771 if (cinfo)
1772 return 0;
1773
1774 if (!info->desc->ops->chan_available(dev, idx)) {
1775 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1776 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1777 return -EINVAL;
1778 goto idr_alloc;
1779 }
1780
1781 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1782 if (!cinfo)
1783 return -ENOMEM;
1784
1785 cinfo->dev = dev;
1786
1787 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1788 if (ret)
1789 return ret;
1790
1791 if (tx && is_polling_required(cinfo, info)) {
1792 if (is_transport_polling_capable(info))
1793 dev_info(dev,
1794 "Enabled polling mode TX channel - prot_id:%d\n",
1795 prot_id);
1796 else
1797 dev_warn(dev,
1798 "Polling mode NOT supported by transport.\n");
1799 }
1800
1801 idr_alloc:
1802 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1803 if (ret != prot_id) {
1804 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1805 return ret;
1806 }
1807
1808 cinfo->handle = &info->handle;
1809 return 0;
1810 }
1811
1812 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device * dev,int prot_id)1813 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1814 {
1815 int ret = scmi_chan_setup(info, dev, prot_id, true);
1816
1817 if (!ret) /* Rx is optional, hence no error check */
1818 scmi_chan_setup(info, dev, prot_id, false);
1819
1820 return ret;
1821 }
1822
1823 /**
1824 * scmi_get_protocol_device - Helper to get/create an SCMI device.
1825 *
1826 * @np: A device node representing a valid active protocols for the referred
1827 * SCMI instance.
1828 * @info: The referred SCMI instance for which we are getting/creating this
1829 * device.
1830 * @prot_id: The protocol ID.
1831 * @name: The device name.
1832 *
1833 * Referring to the specific SCMI instance identified by @info, this helper
1834 * takes care to return a properly initialized device matching the requested
1835 * @proto_id and @name: if device was still not existent it is created as a
1836 * child of the specified SCMI instance @info and its transport properly
1837 * initialized as usual.
1838 *
1839 * Return: A properly initialized scmi device, NULL otherwise.
1840 */
1841 static inline struct scmi_device *
scmi_get_protocol_device(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)1842 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1843 int prot_id, const char *name)
1844 {
1845 struct scmi_device *sdev;
1846
1847 /* Already created for this parent SCMI instance ? */
1848 sdev = scmi_child_dev_find(info->dev, prot_id, name);
1849 if (sdev)
1850 return sdev;
1851
1852 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1853
1854 sdev = scmi_device_create(np, info->dev, prot_id, name);
1855 if (!sdev) {
1856 dev_err(info->dev, "failed to create %d protocol device\n",
1857 prot_id);
1858 return NULL;
1859 }
1860
1861 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1862 dev_err(&sdev->dev, "failed to setup transport\n");
1863 scmi_device_destroy(sdev);
1864 return NULL;
1865 }
1866
1867 return sdev;
1868 }
1869
1870 static inline void
scmi_create_protocol_device(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)1871 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1872 int prot_id, const char *name)
1873 {
1874 struct scmi_device *sdev;
1875
1876 sdev = scmi_get_protocol_device(np, info, prot_id, name);
1877 if (!sdev)
1878 return;
1879
1880 /* setup handle now as the transport is ready */
1881 scmi_set_handle(sdev);
1882 }
1883
1884 /**
1885 * scmi_create_protocol_devices - Create devices for all pending requests for
1886 * this SCMI instance.
1887 *
1888 * @np: The device node describing the protocol
1889 * @info: The SCMI instance descriptor
1890 * @prot_id: The protocol ID
1891 *
1892 * All devices previously requested for this instance (if any) are found and
1893 * created by scanning the proper @&scmi_requested_devices entry.
1894 */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id)1895 static void scmi_create_protocol_devices(struct device_node *np,
1896 struct scmi_info *info, int prot_id)
1897 {
1898 struct list_head *phead;
1899
1900 mutex_lock(&scmi_requested_devices_mtx);
1901 phead = idr_find(&scmi_requested_devices, prot_id);
1902 if (phead) {
1903 struct scmi_requested_dev *rdev;
1904
1905 list_for_each_entry(rdev, phead, node)
1906 scmi_create_protocol_device(np, info, prot_id,
1907 rdev->id_table->name);
1908 }
1909 mutex_unlock(&scmi_requested_devices_mtx);
1910 }
1911
1912 /**
1913 * scmi_protocol_device_request - Helper to request a device
1914 *
1915 * @id_table: A protocol/name pair descriptor for the device to be created.
1916 *
1917 * This helper let an SCMI driver request specific devices identified by the
1918 * @id_table to be created for each active SCMI instance.
1919 *
1920 * The requested device name MUST NOT be already existent for any protocol;
1921 * at first the freshly requested @id_table is annotated in the IDR table
1922 * @scmi_requested_devices, then a matching device is created for each already
1923 * active SCMI instance. (if any)
1924 *
1925 * This way the requested device is created straight-away for all the already
1926 * initialized(probed) SCMI instances (handles) and it remains also annotated
1927 * as pending creation if the requesting SCMI driver was loaded before some
1928 * SCMI instance and related transports were available: when such late instance
1929 * is probed, its probe will take care to scan the list of pending requested
1930 * devices and create those on its own (see @scmi_create_protocol_devices and
1931 * its enclosing loop)
1932 *
1933 * Return: 0 on Success
1934 */
scmi_protocol_device_request(const struct scmi_device_id * id_table)1935 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1936 {
1937 int ret = 0;
1938 unsigned int id = 0;
1939 struct list_head *head, *phead = NULL;
1940 struct scmi_requested_dev *rdev;
1941 struct scmi_info *info;
1942
1943 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1944 id_table->name, id_table->protocol_id);
1945
1946 /*
1947 * Search for the matching protocol rdev list and then search
1948 * of any existent equally named device...fails if any duplicate found.
1949 */
1950 mutex_lock(&scmi_requested_devices_mtx);
1951 idr_for_each_entry(&scmi_requested_devices, head, id) {
1952 if (!phead) {
1953 /* A list found registered in the IDR is never empty */
1954 rdev = list_first_entry(head, struct scmi_requested_dev,
1955 node);
1956 if (rdev->id_table->protocol_id ==
1957 id_table->protocol_id)
1958 phead = head;
1959 }
1960 list_for_each_entry(rdev, head, node) {
1961 if (!strcmp(rdev->id_table->name, id_table->name)) {
1962 pr_err("Ignoring duplicate request [%d] %s\n",
1963 rdev->id_table->protocol_id,
1964 rdev->id_table->name);
1965 ret = -EINVAL;
1966 goto out;
1967 }
1968 }
1969 }
1970
1971 /*
1972 * No duplicate found for requested id_table, so let's create a new
1973 * requested device entry for this new valid request.
1974 */
1975 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1976 if (!rdev) {
1977 ret = -ENOMEM;
1978 goto out;
1979 }
1980 rdev->id_table = id_table;
1981
1982 /*
1983 * Append the new requested device table descriptor to the head of the
1984 * related protocol list, eventually creating such head if not already
1985 * there.
1986 */
1987 if (!phead) {
1988 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1989 if (!phead) {
1990 kfree(rdev);
1991 ret = -ENOMEM;
1992 goto out;
1993 }
1994 INIT_LIST_HEAD(phead);
1995
1996 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1997 id_table->protocol_id,
1998 id_table->protocol_id + 1, GFP_KERNEL);
1999 if (ret != id_table->protocol_id) {
2000 pr_err("Failed to save SCMI device - ret:%d\n", ret);
2001 kfree(rdev);
2002 kfree(phead);
2003 ret = -EINVAL;
2004 goto out;
2005 }
2006 ret = 0;
2007 }
2008 list_add(&rdev->node, phead);
2009
2010 /*
2011 * Now effectively create and initialize the requested device for every
2012 * already initialized SCMI instance which has registered the requested
2013 * protocol as a valid active one: i.e. defined in DT and supported by
2014 * current platform FW.
2015 */
2016 mutex_lock(&scmi_list_mutex);
2017 list_for_each_entry(info, &scmi_list, node) {
2018 struct device_node *child;
2019
2020 child = idr_find(&info->active_protocols,
2021 id_table->protocol_id);
2022 if (child) {
2023 struct scmi_device *sdev;
2024
2025 sdev = scmi_get_protocol_device(child, info,
2026 id_table->protocol_id,
2027 id_table->name);
2028 /* Set handle if not already set: device existed */
2029 if (sdev && !sdev->handle)
2030 sdev->handle =
2031 scmi_handle_get_from_info_unlocked(info);
2032 } else {
2033 dev_err(info->dev,
2034 "Failed. SCMI protocol %d not active.\n",
2035 id_table->protocol_id);
2036 }
2037 }
2038 mutex_unlock(&scmi_list_mutex);
2039
2040 out:
2041 mutex_unlock(&scmi_requested_devices_mtx);
2042
2043 return ret;
2044 }
2045
2046 /**
2047 * scmi_protocol_device_unrequest - Helper to unrequest a device
2048 *
2049 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
2050 *
2051 * An helper to let an SCMI driver release its request about devices; note that
2052 * devices are created and initialized once the first SCMI driver request them
2053 * but they destroyed only on SCMI core unloading/unbinding.
2054 *
2055 * The current SCMI transport layer uses such devices as internal references and
2056 * as such they could be shared as same transport between multiple drivers so
2057 * that cannot be safely destroyed till the whole SCMI stack is removed.
2058 * (unless adding further burden of refcounting.)
2059 */
scmi_protocol_device_unrequest(const struct scmi_device_id * id_table)2060 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
2061 {
2062 struct list_head *phead;
2063
2064 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
2065 id_table->name, id_table->protocol_id);
2066
2067 mutex_lock(&scmi_requested_devices_mtx);
2068 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
2069 if (phead) {
2070 struct scmi_requested_dev *victim, *tmp;
2071
2072 list_for_each_entry_safe(victim, tmp, phead, node) {
2073 if (!strcmp(victim->id_table->name, id_table->name)) {
2074 list_del(&victim->node);
2075 kfree(victim);
2076 break;
2077 }
2078 }
2079
2080 if (list_empty(phead)) {
2081 idr_remove(&scmi_requested_devices,
2082 id_table->protocol_id);
2083 kfree(phead);
2084 }
2085 }
2086 mutex_unlock(&scmi_requested_devices_mtx);
2087 }
2088
scmi_cleanup_txrx_channels(struct scmi_info * info)2089 static int scmi_cleanup_txrx_channels(struct scmi_info *info)
2090 {
2091 int ret;
2092 struct idr *idr = &info->tx_idr;
2093
2094 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2095 idr_destroy(&info->tx_idr);
2096
2097 idr = &info->rx_idr;
2098 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2099 idr_destroy(&info->rx_idr);
2100
2101 return ret;
2102 }
2103
scmi_probe(struct platform_device * pdev)2104 static int scmi_probe(struct platform_device *pdev)
2105 {
2106 int ret;
2107 struct scmi_handle *handle;
2108 const struct scmi_desc *desc;
2109 struct scmi_info *info;
2110 struct device *dev = &pdev->dev;
2111 struct device_node *child, *np = dev->of_node;
2112
2113 desc = of_device_get_match_data(dev);
2114 if (!desc)
2115 return -EINVAL;
2116
2117 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2118 if (!info)
2119 return -ENOMEM;
2120
2121 info->dev = dev;
2122 info->desc = desc;
2123 INIT_LIST_HEAD(&info->node);
2124 idr_init(&info->protocols);
2125 mutex_init(&info->protocols_mtx);
2126 idr_init(&info->active_protocols);
2127
2128 platform_set_drvdata(pdev, info);
2129 idr_init(&info->tx_idr);
2130 idr_init(&info->rx_idr);
2131
2132 handle = &info->handle;
2133 handle->dev = info->dev;
2134 handle->version = &info->version;
2135 handle->devm_protocol_get = scmi_devm_protocol_get;
2136 handle->devm_protocol_put = scmi_devm_protocol_put;
2137
2138 /* System wide atomic threshold for atomic ops .. if any */
2139 if (!of_property_read_u32(np, "atomic-threshold-us",
2140 &info->atomic_threshold))
2141 dev_info(dev,
2142 "SCMI System wide atomic threshold set to %d us\n",
2143 info->atomic_threshold);
2144 handle->is_transport_atomic = scmi_is_transport_atomic;
2145
2146 if (desc->ops->link_supplier) {
2147 ret = desc->ops->link_supplier(dev);
2148 if (ret)
2149 return ret;
2150 }
2151
2152 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
2153 if (ret)
2154 return ret;
2155
2156 ret = scmi_xfer_info_init(info);
2157 if (ret)
2158 goto clear_txrx_setup;
2159
2160 if (scmi_notification_init(handle))
2161 dev_err(dev, "SCMI Notifications NOT available.\n");
2162
2163 if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
2164 dev_err(dev,
2165 "Transport is not polling capable. Atomic mode not supported.\n");
2166
2167 /*
2168 * Trigger SCMI Base protocol initialization.
2169 * It's mandatory and won't be ever released/deinit until the
2170 * SCMI stack is shutdown/unloaded as a whole.
2171 */
2172 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2173 if (ret) {
2174 dev_err(dev, "unable to communicate with SCMI\n");
2175 goto notification_exit;
2176 }
2177
2178 mutex_lock(&scmi_list_mutex);
2179 list_add_tail(&info->node, &scmi_list);
2180 mutex_unlock(&scmi_list_mutex);
2181
2182 for_each_available_child_of_node(np, child) {
2183 u32 prot_id;
2184
2185 if (of_property_read_u32(child, "reg", &prot_id))
2186 continue;
2187
2188 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2189 dev_err(dev, "Out of range protocol %d\n", prot_id);
2190
2191 if (!scmi_is_protocol_implemented(handle, prot_id)) {
2192 dev_err(dev, "SCMI protocol %d not implemented\n",
2193 prot_id);
2194 continue;
2195 }
2196
2197 /*
2198 * Save this valid DT protocol descriptor amongst
2199 * @active_protocols for this SCMI instance/
2200 */
2201 ret = idr_alloc(&info->active_protocols, child,
2202 prot_id, prot_id + 1, GFP_KERNEL);
2203 if (ret != prot_id) {
2204 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2205 prot_id);
2206 continue;
2207 }
2208
2209 of_node_get(child);
2210 scmi_create_protocol_devices(child, info, prot_id);
2211 }
2212
2213 return 0;
2214
2215 notification_exit:
2216 scmi_notification_exit(&info->handle);
2217 clear_txrx_setup:
2218 scmi_cleanup_txrx_channels(info);
2219 return ret;
2220 }
2221
scmi_free_channel(struct scmi_chan_info * cinfo,struct idr * idr,int id)2222 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2223 {
2224 idr_remove(idr, id);
2225 }
2226
scmi_remove(struct platform_device * pdev)2227 static int scmi_remove(struct platform_device *pdev)
2228 {
2229 int ret = 0, id;
2230 struct scmi_info *info = platform_get_drvdata(pdev);
2231 struct device_node *child;
2232
2233 mutex_lock(&scmi_list_mutex);
2234 if (info->users)
2235 ret = -EBUSY;
2236 else
2237 list_del(&info->node);
2238 mutex_unlock(&scmi_list_mutex);
2239
2240 if (ret)
2241 return ret;
2242
2243 scmi_notification_exit(&info->handle);
2244
2245 mutex_lock(&info->protocols_mtx);
2246 idr_destroy(&info->protocols);
2247 mutex_unlock(&info->protocols_mtx);
2248
2249 idr_for_each_entry(&info->active_protocols, child, id)
2250 of_node_put(child);
2251 idr_destroy(&info->active_protocols);
2252
2253 /* Safe to free channels since no more users */
2254 return scmi_cleanup_txrx_channels(info);
2255 }
2256
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)2257 static ssize_t protocol_version_show(struct device *dev,
2258 struct device_attribute *attr, char *buf)
2259 {
2260 struct scmi_info *info = dev_get_drvdata(dev);
2261
2262 return sprintf(buf, "%u.%u\n", info->version.major_ver,
2263 info->version.minor_ver);
2264 }
2265 static DEVICE_ATTR_RO(protocol_version);
2266
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)2267 static ssize_t firmware_version_show(struct device *dev,
2268 struct device_attribute *attr, char *buf)
2269 {
2270 struct scmi_info *info = dev_get_drvdata(dev);
2271
2272 return sprintf(buf, "0x%x\n", info->version.impl_ver);
2273 }
2274 static DEVICE_ATTR_RO(firmware_version);
2275
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2276 static ssize_t vendor_id_show(struct device *dev,
2277 struct device_attribute *attr, char *buf)
2278 {
2279 struct scmi_info *info = dev_get_drvdata(dev);
2280
2281 return sprintf(buf, "%s\n", info->version.vendor_id);
2282 }
2283 static DEVICE_ATTR_RO(vendor_id);
2284
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2285 static ssize_t sub_vendor_id_show(struct device *dev,
2286 struct device_attribute *attr, char *buf)
2287 {
2288 struct scmi_info *info = dev_get_drvdata(dev);
2289
2290 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2291 }
2292 static DEVICE_ATTR_RO(sub_vendor_id);
2293
2294 static struct attribute *versions_attrs[] = {
2295 &dev_attr_firmware_version.attr,
2296 &dev_attr_protocol_version.attr,
2297 &dev_attr_vendor_id.attr,
2298 &dev_attr_sub_vendor_id.attr,
2299 NULL,
2300 };
2301 ATTRIBUTE_GROUPS(versions);
2302
2303 /* Each compatible listed below must have descriptor associated with it */
2304 static const struct of_device_id scmi_of_match[] = {
2305 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2306 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2307 #endif
2308 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2309 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2310 #endif
2311 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2312 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2313 #endif
2314 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2315 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2316 #endif
2317 { /* Sentinel */ },
2318 };
2319
2320 MODULE_DEVICE_TABLE(of, scmi_of_match);
2321
2322 static struct platform_driver scmi_driver = {
2323 .driver = {
2324 .name = "arm-scmi",
2325 .of_match_table = scmi_of_match,
2326 .dev_groups = versions_groups,
2327 },
2328 .probe = scmi_probe,
2329 .remove = scmi_remove,
2330 };
2331
2332 /**
2333 * __scmi_transports_setup - Common helper to call transport-specific
2334 * .init/.exit code if provided.
2335 *
2336 * @init: A flag to distinguish between init and exit.
2337 *
2338 * Note that, if provided, we invoke .init/.exit functions for all the
2339 * transports currently compiled in.
2340 *
2341 * Return: 0 on Success.
2342 */
__scmi_transports_setup(bool init)2343 static inline int __scmi_transports_setup(bool init)
2344 {
2345 int ret = 0;
2346 const struct of_device_id *trans;
2347
2348 for (trans = scmi_of_match; trans->data; trans++) {
2349 const struct scmi_desc *tdesc = trans->data;
2350
2351 if ((init && !tdesc->transport_init) ||
2352 (!init && !tdesc->transport_exit))
2353 continue;
2354
2355 if (init)
2356 ret = tdesc->transport_init();
2357 else
2358 tdesc->transport_exit();
2359
2360 if (ret) {
2361 pr_err("SCMI transport %s FAILED initialization!\n",
2362 trans->compatible);
2363 break;
2364 }
2365 }
2366
2367 return ret;
2368 }
2369
scmi_transports_init(void)2370 static int __init scmi_transports_init(void)
2371 {
2372 return __scmi_transports_setup(true);
2373 }
2374
scmi_transports_exit(void)2375 static void __exit scmi_transports_exit(void)
2376 {
2377 __scmi_transports_setup(false);
2378 }
2379
scmi_driver_init(void)2380 static int __init scmi_driver_init(void)
2381 {
2382 int ret;
2383
2384 /* Bail out if no SCMI transport was configured */
2385 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2386 return -EINVAL;
2387
2388 scmi_bus_init();
2389
2390 /* Initialize any compiled-in transport which provided an init/exit */
2391 ret = scmi_transports_init();
2392 if (ret)
2393 return ret;
2394
2395 scmi_base_register();
2396
2397 scmi_clock_register();
2398 scmi_perf_register();
2399 scmi_power_register();
2400 scmi_reset_register();
2401 scmi_sensors_register();
2402 scmi_voltage_register();
2403 scmi_system_register();
2404
2405 return platform_driver_register(&scmi_driver);
2406 }
2407 subsys_initcall(scmi_driver_init);
2408
scmi_driver_exit(void)2409 static void __exit scmi_driver_exit(void)
2410 {
2411 scmi_base_unregister();
2412
2413 scmi_clock_unregister();
2414 scmi_perf_unregister();
2415 scmi_power_unregister();
2416 scmi_reset_unregister();
2417 scmi_sensors_unregister();
2418 scmi_voltage_unregister();
2419 scmi_system_unregister();
2420
2421 scmi_bus_exit();
2422
2423 scmi_transports_exit();
2424
2425 platform_driver_unregister(&scmi_driver);
2426 }
2427 module_exit(scmi_driver_exit);
2428
2429 MODULE_ALIAS("platform:arm-scmi");
2430 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2431 MODULE_DESCRIPTION("ARM SCMI protocol driver");
2432 MODULE_LICENSE("GPL v2");
2433