1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2021 ARM Ltd.
15 */
16
17 #include <linux/bitmap.h>
18 #include <linux/device.h>
19 #include <linux/export.h>
20 #include <linux/idr.h>
21 #include <linux/io.h>
22 #include <linux/io-64-nonatomic-hi-lo.h>
23 #include <linux/kernel.h>
24 #include <linux/ktime.h>
25 #include <linux/hashtable.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/of_address.h>
29 #include <linux/of_device.h>
30 #include <linux/processor.h>
31 #include <linux/refcount.h>
32 #include <linux/slab.h>
33
34 #include "common.h"
35 #include "notify.h"
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/scmi.h>
39
40 enum scmi_error_codes {
41 SCMI_SUCCESS = 0, /* Success */
42 SCMI_ERR_SUPPORT = -1, /* Not supported */
43 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
44 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
45 SCMI_ERR_ENTRY = -4, /* Not found */
46 SCMI_ERR_RANGE = -5, /* Value out of range */
47 SCMI_ERR_BUSY = -6, /* Device busy */
48 SCMI_ERR_COMMS = -7, /* Communication Error */
49 SCMI_ERR_GENERIC = -8, /* Generic Error */
50 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
51 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
52 };
53
54 /* List of all SCMI devices active in system */
55 static LIST_HEAD(scmi_list);
56 /* Protection for the entire list */
57 static DEFINE_MUTEX(scmi_list_mutex);
58 /* Track the unique id for the transfers for debug & profiling purpose */
59 static atomic_t transfer_last_id;
60
61 static DEFINE_IDR(scmi_requested_devices);
62 static DEFINE_MUTEX(scmi_requested_devices_mtx);
63
64 /* Track globally the creation of SCMI SystemPower related devices */
65 static bool scmi_syspower_registered;
66 /* Protect access to scmi_syspower_registered */
67 static DEFINE_MUTEX(scmi_syspower_mtx);
68
69 struct scmi_requested_dev {
70 const struct scmi_device_id *id_table;
71 struct list_head node;
72 };
73
74 /**
75 * struct scmi_xfers_info - Structure to manage transfer information
76 *
77 * @xfer_alloc_table: Bitmap table for allocated messages.
78 * Index of this bitmap table is also used for message
79 * sequence identifier.
80 * @xfer_lock: Protection for message allocation
81 * @max_msg: Maximum number of messages that can be pending
82 * @free_xfers: A free list for available to use xfers. It is initialized with
83 * a number of xfers equal to the maximum allowed in-flight
84 * messages.
85 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
86 * currently in-flight messages.
87 */
88 struct scmi_xfers_info {
89 unsigned long *xfer_alloc_table;
90 spinlock_t xfer_lock;
91 int max_msg;
92 struct hlist_head free_xfers;
93 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
94 };
95
96 /**
97 * struct scmi_protocol_instance - Describe an initialized protocol instance.
98 * @handle: Reference to the SCMI handle associated to this protocol instance.
99 * @proto: A reference to the protocol descriptor.
100 * @gid: A reference for per-protocol devres management.
101 * @users: A refcount to track effective users of this protocol.
102 * @priv: Reference for optional protocol private data.
103 * @ph: An embedded protocol handle that will be passed down to protocol
104 * initialization code to identify this instance.
105 *
106 * Each protocol is initialized independently once for each SCMI platform in
107 * which is defined by DT and implemented by the SCMI server fw.
108 */
109 struct scmi_protocol_instance {
110 const struct scmi_handle *handle;
111 const struct scmi_protocol *proto;
112 void *gid;
113 refcount_t users;
114 void *priv;
115 struct scmi_protocol_handle ph;
116 };
117
118 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
119
120 /**
121 * struct scmi_info - Structure representing a SCMI instance
122 *
123 * @dev: Device pointer
124 * @desc: SoC description for this instance
125 * @version: SCMI revision information containing protocol version,
126 * implementation version and (sub-)vendor identification.
127 * @handle: Instance of SCMI handle to send to clients
128 * @tx_minfo: Universal Transmit Message management info
129 * @rx_minfo: Universal Receive Message management info
130 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
131 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
132 * @protocols: IDR for protocols' instance descriptors initialized for
133 * this SCMI instance: populated on protocol's first attempted
134 * usage.
135 * @protocols_mtx: A mutex to protect protocols instances initialization.
136 * @protocols_imp: List of protocols implemented, currently maximum of
137 * scmi_revision_info.num_protocols elements allocated by the
138 * base protocol
139 * @active_protocols: IDR storing device_nodes for protocols actually defined
140 * in the DT and confirmed as implemented by fw.
141 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
142 * in microseconds, for atomic operations.
143 * Only SCMI synchronous commands reported by the platform
144 * to have an execution latency lesser-equal to the threshold
145 * should be considered for atomic mode operation: such
146 * decision is finally left up to the SCMI drivers.
147 * @notify_priv: Pointer to private data structure specific to notifications.
148 * @node: List head
149 * @users: Number of users of this instance
150 */
151 struct scmi_info {
152 struct device *dev;
153 const struct scmi_desc *desc;
154 struct scmi_revision_info version;
155 struct scmi_handle handle;
156 struct scmi_xfers_info tx_minfo;
157 struct scmi_xfers_info rx_minfo;
158 struct idr tx_idr;
159 struct idr rx_idr;
160 struct idr protocols;
161 /* Ensure mutual exclusive access to protocols instance array */
162 struct mutex protocols_mtx;
163 u8 *protocols_imp;
164 struct idr active_protocols;
165 unsigned int atomic_threshold;
166 void *notify_priv;
167 struct list_head node;
168 int users;
169 };
170
171 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
172
173 static const int scmi_linux_errmap[] = {
174 /* better than switch case as long as return value is continuous */
175 0, /* SCMI_SUCCESS */
176 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
177 -EINVAL, /* SCMI_ERR_PARAM */
178 -EACCES, /* SCMI_ERR_ACCESS */
179 -ENOENT, /* SCMI_ERR_ENTRY */
180 -ERANGE, /* SCMI_ERR_RANGE */
181 -EBUSY, /* SCMI_ERR_BUSY */
182 -ECOMM, /* SCMI_ERR_COMMS */
183 -EIO, /* SCMI_ERR_GENERIC */
184 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
185 -EPROTO, /* SCMI_ERR_PROTOCOL */
186 };
187
scmi_to_linux_errno(int errno)188 static inline int scmi_to_linux_errno(int errno)
189 {
190 int err_idx = -errno;
191
192 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
193 return scmi_linux_errmap[err_idx];
194 return -EIO;
195 }
196
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)197 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
198 void *priv)
199 {
200 struct scmi_info *info = handle_to_scmi_info(handle);
201
202 info->notify_priv = priv;
203 /* Ensure updated protocol private date are visible */
204 smp_wmb();
205 }
206
scmi_notification_instance_data_get(const struct scmi_handle * handle)207 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
208 {
209 struct scmi_info *info = handle_to_scmi_info(handle);
210
211 /* Ensure protocols_private_data has been updated */
212 smp_rmb();
213 return info->notify_priv;
214 }
215
216 /**
217 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
218 *
219 * @minfo: Pointer to Tx/Rx Message management info based on channel type
220 * @xfer: The xfer to act upon
221 *
222 * Pick the next unused monotonically increasing token and set it into
223 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
224 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
225 * of incorrect association of a late and expired xfer with a live in-flight
226 * transaction, both happening to re-use the same token identifier.
227 *
228 * Since platform is NOT required to answer our request in-order we should
229 * account for a few rare but possible scenarios:
230 *
231 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
232 * using find_next_zero_bit() starting from candidate next_token bit
233 *
234 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
235 * are plenty of free tokens at start, so try a second pass using
236 * find_next_zero_bit() and starting from 0.
237 *
238 * X = used in-flight
239 *
240 * Normal
241 * ------
242 *
243 * |- xfer_id picked
244 * -----------+----------------------------------------------------------
245 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
246 * ----------------------------------------------------------------------
247 * ^
248 * |- next_token
249 *
250 * Out-of-order pending at start
251 * -----------------------------
252 *
253 * |- xfer_id picked, last_token fixed
254 * -----+----------------------------------------------------------------
255 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
256 * ----------------------------------------------------------------------
257 * ^
258 * |- next_token
259 *
260 *
261 * Out-of-order pending at end
262 * ---------------------------
263 *
264 * |- xfer_id picked, last_token fixed
265 * -----+----------------------------------------------------------------
266 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
267 * ----------------------------------------------------------------------
268 * ^
269 * |- next_token
270 *
271 * Context: Assumes to be called with @xfer_lock already acquired.
272 *
273 * Return: 0 on Success or error
274 */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)275 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
276 struct scmi_xfer *xfer)
277 {
278 unsigned long xfer_id, next_token;
279
280 /*
281 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
282 * using the pre-allocated transfer_id as a base.
283 * Note that the global transfer_id is shared across all message types
284 * so there could be holes in the allocated set of monotonic sequence
285 * numbers, but that is going to limit the effectiveness of the
286 * mitigation only in very rare limit conditions.
287 */
288 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
289
290 /* Pick the next available xfer_id >= next_token */
291 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
292 MSG_TOKEN_MAX, next_token);
293 if (xfer_id == MSG_TOKEN_MAX) {
294 /*
295 * After heavily out-of-order responses, there are no free
296 * tokens ahead, but only at start of xfer_alloc_table so
297 * try again from the beginning.
298 */
299 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
300 MSG_TOKEN_MAX, 0);
301 /*
302 * Something is wrong if we got here since there can be a
303 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
304 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
305 */
306 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
307 return -ENOMEM;
308 }
309
310 /* Update +/- last_token accordingly if we skipped some hole */
311 if (xfer_id != next_token)
312 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
313
314 /* Set in-flight */
315 set_bit(xfer_id, minfo->xfer_alloc_table);
316 xfer->hdr.seq = (u16)xfer_id;
317
318 return 0;
319 }
320
321 /**
322 * scmi_xfer_token_clear - Release the token
323 *
324 * @minfo: Pointer to Tx/Rx Message management info based on channel type
325 * @xfer: The xfer to act upon
326 */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)327 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
328 struct scmi_xfer *xfer)
329 {
330 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
331 }
332
333 /**
334 * scmi_xfer_get() - Allocate one message
335 *
336 * @handle: Pointer to SCMI entity handle
337 * @minfo: Pointer to Tx/Rx Message management info based on channel type
338 * @set_pending: If true a monotonic token is picked and the xfer is added to
339 * the pending hash table.
340 *
341 * Helper function which is used by various message functions that are
342 * exposed to clients of this driver for allocating a message traffic event.
343 *
344 * Picks an xfer from the free list @free_xfers (if any available) and, if
345 * required, sets a monotonically increasing token and stores the inflight xfer
346 * into the @pending_xfers hashtable for later retrieval.
347 *
348 * The successfully initialized xfer is refcounted.
349 *
350 * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
351 * @free_xfers.
352 *
353 * Return: 0 if all went fine, else corresponding error.
354 */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo,bool set_pending)355 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
356 struct scmi_xfers_info *minfo,
357 bool set_pending)
358 {
359 int ret;
360 unsigned long flags;
361 struct scmi_xfer *xfer;
362
363 spin_lock_irqsave(&minfo->xfer_lock, flags);
364 if (hlist_empty(&minfo->free_xfers)) {
365 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
366 return ERR_PTR(-ENOMEM);
367 }
368
369 /* grab an xfer from the free_list */
370 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
371 hlist_del_init(&xfer->node);
372
373 /*
374 * Allocate transfer_id early so that can be used also as base for
375 * monotonic sequence number generation if needed.
376 */
377 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
378
379 if (set_pending) {
380 /* Pick and set monotonic token */
381 ret = scmi_xfer_token_set(minfo, xfer);
382 if (!ret) {
383 hash_add(minfo->pending_xfers, &xfer->node,
384 xfer->hdr.seq);
385 xfer->pending = true;
386 } else {
387 dev_err(handle->dev,
388 "Failed to get monotonic token %d\n", ret);
389 hlist_add_head(&xfer->node, &minfo->free_xfers);
390 xfer = ERR_PTR(ret);
391 }
392 }
393
394 if (!IS_ERR(xfer)) {
395 refcount_set(&xfer->users, 1);
396 atomic_set(&xfer->busy, SCMI_XFER_FREE);
397 }
398 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
399
400 return xfer;
401 }
402
403 /**
404 * __scmi_xfer_put() - Release a message
405 *
406 * @minfo: Pointer to Tx/Rx Message management info based on channel type
407 * @xfer: message that was reserved by scmi_xfer_get
408 *
409 * After refcount check, possibly release an xfer, clearing the token slot,
410 * removing xfer from @pending_xfers and putting it back into free_xfers.
411 *
412 * This holds a spinlock to maintain integrity of internal data structures.
413 */
414 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)415 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
416 {
417 unsigned long flags;
418
419 spin_lock_irqsave(&minfo->xfer_lock, flags);
420 if (refcount_dec_and_test(&xfer->users)) {
421 if (xfer->pending) {
422 scmi_xfer_token_clear(minfo, xfer);
423 hash_del(&xfer->node);
424 xfer->pending = false;
425 }
426 hlist_add_head(&xfer->node, &minfo->free_xfers);
427 }
428 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
429 }
430
431 /**
432 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
433 *
434 * @minfo: Pointer to Tx/Rx Message management info based on channel type
435 * @xfer_id: Token ID to lookup in @pending_xfers
436 *
437 * Refcounting is untouched.
438 *
439 * Context: Assumes to be called with @xfer_lock already acquired.
440 *
441 * Return: A valid xfer on Success or error otherwise
442 */
443 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)444 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
445 {
446 struct scmi_xfer *xfer = NULL;
447
448 if (test_bit(xfer_id, minfo->xfer_alloc_table))
449 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
450
451 return xfer ?: ERR_PTR(-EINVAL);
452 }
453
454 /**
455 * scmi_msg_response_validate - Validate message type against state of related
456 * xfer
457 *
458 * @cinfo: A reference to the channel descriptor.
459 * @msg_type: Message type to check
460 * @xfer: A reference to the xfer to validate against @msg_type
461 *
462 * This function checks if @msg_type is congruent with the current state of
463 * a pending @xfer; if an asynchronous delayed response is received before the
464 * related synchronous response (Out-of-Order Delayed Response) the missing
465 * synchronous response is assumed to be OK and completed, carrying on with the
466 * Delayed Response: this is done to address the case in which the underlying
467 * SCMI transport can deliver such out-of-order responses.
468 *
469 * Context: Assumes to be called with xfer->lock already acquired.
470 *
471 * Return: 0 on Success, error otherwise
472 */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)473 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
474 u8 msg_type,
475 struct scmi_xfer *xfer)
476 {
477 /*
478 * Even if a response was indeed expected on this slot at this point,
479 * a buggy platform could wrongly reply feeding us an unexpected
480 * delayed response we're not prepared to handle: bail-out safely
481 * blaming firmware.
482 */
483 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
484 dev_err(cinfo->dev,
485 "Delayed Response for %d not expected! Buggy F/W ?\n",
486 xfer->hdr.seq);
487 return -EINVAL;
488 }
489
490 switch (xfer->state) {
491 case SCMI_XFER_SENT_OK:
492 if (msg_type == MSG_TYPE_DELAYED_RESP) {
493 /*
494 * Delayed Response expected but delivered earlier.
495 * Assume message RESPONSE was OK and skip state.
496 */
497 xfer->hdr.status = SCMI_SUCCESS;
498 xfer->state = SCMI_XFER_RESP_OK;
499 complete(&xfer->done);
500 dev_warn(cinfo->dev,
501 "Received valid OoO Delayed Response for %d\n",
502 xfer->hdr.seq);
503 }
504 break;
505 case SCMI_XFER_RESP_OK:
506 if (msg_type != MSG_TYPE_DELAYED_RESP)
507 return -EINVAL;
508 break;
509 case SCMI_XFER_DRESP_OK:
510 /* No further message expected once in SCMI_XFER_DRESP_OK */
511 return -EINVAL;
512 }
513
514 return 0;
515 }
516
517 /**
518 * scmi_xfer_state_update - Update xfer state
519 *
520 * @xfer: A reference to the xfer to update
521 * @msg_type: Type of message being processed.
522 *
523 * Note that this message is assumed to have been already successfully validated
524 * by @scmi_msg_response_validate(), so here we just update the state.
525 *
526 * Context: Assumes to be called on an xfer exclusively acquired using the
527 * busy flag.
528 */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)529 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
530 {
531 xfer->hdr.type = msg_type;
532
533 /* Unknown command types were already discarded earlier */
534 if (xfer->hdr.type == MSG_TYPE_COMMAND)
535 xfer->state = SCMI_XFER_RESP_OK;
536 else
537 xfer->state = SCMI_XFER_DRESP_OK;
538 }
539
scmi_xfer_acquired(struct scmi_xfer * xfer)540 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
541 {
542 int ret;
543
544 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
545
546 return ret == SCMI_XFER_FREE;
547 }
548
549 /**
550 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
551 *
552 * @cinfo: A reference to the channel descriptor.
553 * @msg_hdr: A message header to use as lookup key
554 *
555 * When a valid xfer is found for the sequence number embedded in the provided
556 * msg_hdr, reference counting is properly updated and exclusive access to this
557 * xfer is granted till released with @scmi_xfer_command_release.
558 *
559 * Return: A valid @xfer on Success or error otherwise.
560 */
561 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)562 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
563 {
564 int ret;
565 unsigned long flags;
566 struct scmi_xfer *xfer;
567 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
568 struct scmi_xfers_info *minfo = &info->tx_minfo;
569 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
570 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
571
572 /* Are we even expecting this? */
573 spin_lock_irqsave(&minfo->xfer_lock, flags);
574 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
575 if (IS_ERR(xfer)) {
576 dev_err(cinfo->dev,
577 "Message for %d type %d is not expected!\n",
578 xfer_id, msg_type);
579 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
580 return xfer;
581 }
582 refcount_inc(&xfer->users);
583 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
584
585 spin_lock_irqsave(&xfer->lock, flags);
586 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
587 /*
588 * If a pending xfer was found which was also in a congruent state with
589 * the received message, acquire exclusive access to it setting the busy
590 * flag.
591 * Spins only on the rare limit condition of concurrent reception of
592 * RESP and DRESP for the same xfer.
593 */
594 if (!ret) {
595 spin_until_cond(scmi_xfer_acquired(xfer));
596 scmi_xfer_state_update(xfer, msg_type);
597 }
598 spin_unlock_irqrestore(&xfer->lock, flags);
599
600 if (ret) {
601 dev_err(cinfo->dev,
602 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
603 msg_type, xfer_id, msg_hdr, xfer->state);
604 /* On error the refcount incremented above has to be dropped */
605 __scmi_xfer_put(minfo, xfer);
606 xfer = ERR_PTR(-EINVAL);
607 }
608
609 return xfer;
610 }
611
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)612 static inline void scmi_xfer_command_release(struct scmi_info *info,
613 struct scmi_xfer *xfer)
614 {
615 atomic_set(&xfer->busy, SCMI_XFER_FREE);
616 __scmi_xfer_put(&info->tx_minfo, xfer);
617 }
618
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)619 static inline void scmi_clear_channel(struct scmi_info *info,
620 struct scmi_chan_info *cinfo)
621 {
622 if (info->desc->ops->clear_channel)
623 info->desc->ops->clear_channel(cinfo);
624 }
625
is_polling_required(struct scmi_chan_info * cinfo,struct scmi_info * info)626 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
627 struct scmi_info *info)
628 {
629 return cinfo->no_completion_irq || info->desc->force_polling;
630 }
631
is_transport_polling_capable(struct scmi_info * info)632 static inline bool is_transport_polling_capable(struct scmi_info *info)
633 {
634 return info->desc->ops->poll_done ||
635 info->desc->sync_cmds_completed_on_ret;
636 }
637
is_polling_enabled(struct scmi_chan_info * cinfo,struct scmi_info * info)638 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
639 struct scmi_info *info)
640 {
641 return is_polling_required(cinfo, info) &&
642 is_transport_polling_capable(info);
643 }
644
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)645 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
646 u32 msg_hdr, void *priv)
647 {
648 struct scmi_xfer *xfer;
649 struct device *dev = cinfo->dev;
650 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
651 struct scmi_xfers_info *minfo = &info->rx_minfo;
652 ktime_t ts;
653
654 ts = ktime_get_boottime();
655 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
656 if (IS_ERR(xfer)) {
657 dev_err(dev, "failed to get free message slot (%ld)\n",
658 PTR_ERR(xfer));
659 scmi_clear_channel(info, cinfo);
660 return;
661 }
662
663 unpack_scmi_header(msg_hdr, &xfer->hdr);
664 if (priv)
665 /* Ensure order between xfer->priv store and following ops */
666 smp_store_mb(xfer->priv, priv);
667 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
668 xfer);
669
670 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
671 xfer->hdr.seq, xfer->hdr.status,
672 xfer->rx.buf, xfer->rx.len);
673
674 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
675 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
676
677 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
678 xfer->hdr.protocol_id, xfer->hdr.seq,
679 MSG_TYPE_NOTIFICATION);
680
681 __scmi_xfer_put(minfo, xfer);
682
683 scmi_clear_channel(info, cinfo);
684 }
685
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)686 static void scmi_handle_response(struct scmi_chan_info *cinfo,
687 u32 msg_hdr, void *priv)
688 {
689 struct scmi_xfer *xfer;
690 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
691
692 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
693 if (IS_ERR(xfer)) {
694 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
695 scmi_clear_channel(info, cinfo);
696 return;
697 }
698
699 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
700 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
701 xfer->rx.len = info->desc->max_msg_size;
702
703 if (priv)
704 /* Ensure order between xfer->priv store and following ops */
705 smp_store_mb(xfer->priv, priv);
706 info->desc->ops->fetch_response(cinfo, xfer);
707
708 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
709 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
710 "DLYD" : "RESP",
711 xfer->hdr.seq, xfer->hdr.status,
712 xfer->rx.buf, xfer->rx.len);
713
714 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
715 xfer->hdr.protocol_id, xfer->hdr.seq,
716 xfer->hdr.type);
717
718 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
719 scmi_clear_channel(info, cinfo);
720 complete(xfer->async_done);
721 } else {
722 complete(&xfer->done);
723 }
724
725 scmi_xfer_command_release(info, xfer);
726 }
727
728 /**
729 * scmi_rx_callback() - callback for receiving messages
730 *
731 * @cinfo: SCMI channel info
732 * @msg_hdr: Message header
733 * @priv: Transport specific private data.
734 *
735 * Processes one received message to appropriate transfer information and
736 * signals completion of the transfer.
737 *
738 * NOTE: This function will be invoked in IRQ context, hence should be
739 * as optimal as possible.
740 */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)741 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
742 {
743 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
744
745 switch (msg_type) {
746 case MSG_TYPE_NOTIFICATION:
747 scmi_handle_notification(cinfo, msg_hdr, priv);
748 break;
749 case MSG_TYPE_COMMAND:
750 case MSG_TYPE_DELAYED_RESP:
751 scmi_handle_response(cinfo, msg_hdr, priv);
752 break;
753 default:
754 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
755 break;
756 }
757 }
758
759 /**
760 * xfer_put() - Release a transmit message
761 *
762 * @ph: Pointer to SCMI protocol handle
763 * @xfer: message that was reserved by xfer_get_init
764 */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)765 static void xfer_put(const struct scmi_protocol_handle *ph,
766 struct scmi_xfer *xfer)
767 {
768 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
769 struct scmi_info *info = handle_to_scmi_info(pi->handle);
770
771 __scmi_xfer_put(&info->tx_minfo, xfer);
772 }
773
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop)774 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
775 struct scmi_xfer *xfer, ktime_t stop)
776 {
777 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
778
779 /*
780 * Poll also on xfer->done so that polling can be forcibly terminated
781 * in case of out-of-order receptions of delayed responses
782 */
783 return info->desc->ops->poll_done(cinfo, xfer) ||
784 try_wait_for_completion(&xfer->done) ||
785 ktime_after(ktime_get(), stop);
786 }
787
788 /**
789 * scmi_wait_for_message_response - An helper to group all the possible ways of
790 * waiting for a synchronous message response.
791 *
792 * @cinfo: SCMI channel info
793 * @xfer: Reference to the transfer being waited for.
794 *
795 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
796 * configuration flags like xfer->hdr.poll_completion.
797 *
798 * Return: 0 on Success, error otherwise.
799 */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)800 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
801 struct scmi_xfer *xfer)
802 {
803 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
804 struct device *dev = info->dev;
805 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
806
807 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
808 xfer->hdr.protocol_id, xfer->hdr.seq,
809 timeout_ms,
810 xfer->hdr.poll_completion);
811
812 if (xfer->hdr.poll_completion) {
813 /*
814 * Real polling is needed only if transport has NOT declared
815 * itself to support synchronous commands replies.
816 */
817 if (!info->desc->sync_cmds_completed_on_ret) {
818 /*
819 * Poll on xfer using transport provided .poll_done();
820 * assumes no completion interrupt was available.
821 */
822 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
823
824 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
825 xfer, stop));
826 if (ktime_after(ktime_get(), stop)) {
827 dev_err(dev,
828 "timed out in resp(caller: %pS) - polling\n",
829 (void *)_RET_IP_);
830 ret = -ETIMEDOUT;
831 }
832 }
833
834 if (!ret) {
835 unsigned long flags;
836
837 /*
838 * Do not fetch_response if an out-of-order delayed
839 * response is being processed.
840 */
841 spin_lock_irqsave(&xfer->lock, flags);
842 if (xfer->state == SCMI_XFER_SENT_OK) {
843 info->desc->ops->fetch_response(cinfo, xfer);
844 xfer->state = SCMI_XFER_RESP_OK;
845 }
846 spin_unlock_irqrestore(&xfer->lock, flags);
847
848 /* Trace polled replies. */
849 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
850 "RESP",
851 xfer->hdr.seq, xfer->hdr.status,
852 xfer->rx.buf, xfer->rx.len);
853 }
854 } else {
855 /* And we wait for the response. */
856 if (!wait_for_completion_timeout(&xfer->done,
857 msecs_to_jiffies(timeout_ms))) {
858 dev_err(dev, "timed out in resp(caller: %pS)\n",
859 (void *)_RET_IP_);
860 ret = -ETIMEDOUT;
861 }
862 }
863
864 return ret;
865 }
866
867 /**
868 * do_xfer() - Do one transfer
869 *
870 * @ph: Pointer to SCMI protocol handle
871 * @xfer: Transfer to initiate and wait for response
872 *
873 * Return: -ETIMEDOUT in case of no response, if transmit error,
874 * return corresponding error, else if all goes well,
875 * return 0.
876 */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)877 static int do_xfer(const struct scmi_protocol_handle *ph,
878 struct scmi_xfer *xfer)
879 {
880 int ret;
881 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
882 struct scmi_info *info = handle_to_scmi_info(pi->handle);
883 struct device *dev = info->dev;
884 struct scmi_chan_info *cinfo;
885
886 /* Check for polling request on custom command xfers at first */
887 if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
888 dev_warn_once(dev,
889 "Polling mode is not supported by transport.\n");
890 return -EINVAL;
891 }
892
893 cinfo = idr_find(&info->tx_idr, pi->proto->id);
894 if (unlikely(!cinfo))
895 return -EINVAL;
896
897 /* True ONLY if also supported by transport. */
898 if (is_polling_enabled(cinfo, info))
899 xfer->hdr.poll_completion = true;
900
901 /*
902 * Initialise protocol id now from protocol handle to avoid it being
903 * overridden by mistake (or malice) by the protocol code mangling with
904 * the scmi_xfer structure prior to this.
905 */
906 xfer->hdr.protocol_id = pi->proto->id;
907 reinit_completion(&xfer->done);
908
909 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
910 xfer->hdr.protocol_id, xfer->hdr.seq,
911 xfer->hdr.poll_completion);
912
913 xfer->state = SCMI_XFER_SENT_OK;
914 /*
915 * Even though spinlocking is not needed here since no race is possible
916 * on xfer->state due to the monotonically increasing tokens allocation,
917 * we must anyway ensure xfer->state initialization is not re-ordered
918 * after the .send_message() to be sure that on the RX path an early
919 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
920 */
921 smp_mb();
922
923 ret = info->desc->ops->send_message(cinfo, xfer);
924 if (ret < 0) {
925 dev_dbg(dev, "Failed to send message %d\n", ret);
926 return ret;
927 }
928
929 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
930 xfer->hdr.seq, xfer->hdr.status,
931 xfer->tx.buf, xfer->tx.len);
932
933 ret = scmi_wait_for_message_response(cinfo, xfer);
934 if (!ret && xfer->hdr.status)
935 ret = scmi_to_linux_errno(xfer->hdr.status);
936
937 if (info->desc->ops->mark_txdone)
938 info->desc->ops->mark_txdone(cinfo, ret, xfer);
939
940 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
941 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
942
943 return ret;
944 }
945
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)946 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
947 struct scmi_xfer *xfer)
948 {
949 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
950 struct scmi_info *info = handle_to_scmi_info(pi->handle);
951
952 xfer->rx.len = info->desc->max_msg_size;
953 }
954
955 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
956
957 /**
958 * do_xfer_with_response() - Do one transfer and wait until the delayed
959 * response is received
960 *
961 * @ph: Pointer to SCMI protocol handle
962 * @xfer: Transfer to initiate and wait for response
963 *
964 * Using asynchronous commands in atomic/polling mode should be avoided since
965 * it could cause long busy-waiting here, so ignore polling for the delayed
966 * response and WARN if it was requested for this command transaction since
967 * upper layers should refrain from issuing such kind of requests.
968 *
969 * The only other option would have been to refrain from using any asynchronous
970 * command even if made available, when an atomic transport is detected, and
971 * instead forcibly use the synchronous version (thing that can be easily
972 * attained at the protocol layer), but this would also have led to longer
973 * stalls of the channel for synchronous commands and possibly timeouts.
974 * (in other words there is usually a good reason if a platform provides an
975 * asynchronous version of a command and we should prefer to use it...just not
976 * when using atomic/polling mode)
977 *
978 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
979 * return corresponding error, else if all goes well, return 0.
980 */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)981 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
982 struct scmi_xfer *xfer)
983 {
984 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
985 DECLARE_COMPLETION_ONSTACK(async_response);
986
987 xfer->async_done = &async_response;
988
989 /*
990 * Delayed responses should not be polled, so an async command should
991 * not have been used when requiring an atomic/poll context; WARN and
992 * perform instead a sleeping wait.
993 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
994 */
995 WARN_ON_ONCE(xfer->hdr.poll_completion);
996
997 ret = do_xfer(ph, xfer);
998 if (!ret) {
999 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1000 dev_err(ph->dev,
1001 "timed out in delayed resp(caller: %pS)\n",
1002 (void *)_RET_IP_);
1003 ret = -ETIMEDOUT;
1004 } else if (xfer->hdr.status) {
1005 ret = scmi_to_linux_errno(xfer->hdr.status);
1006 }
1007 }
1008
1009 xfer->async_done = NULL;
1010 return ret;
1011 }
1012
1013 /**
1014 * xfer_get_init() - Allocate and initialise one message for transmit
1015 *
1016 * @ph: Pointer to SCMI protocol handle
1017 * @msg_id: Message identifier
1018 * @tx_size: transmit message size
1019 * @rx_size: receive message size
1020 * @p: pointer to the allocated and initialised message
1021 *
1022 * This function allocates the message using @scmi_xfer_get and
1023 * initialise the header.
1024 *
1025 * Return: 0 if all went fine with @p pointing to message, else
1026 * corresponding error.
1027 */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1028 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1029 u8 msg_id, size_t tx_size, size_t rx_size,
1030 struct scmi_xfer **p)
1031 {
1032 int ret;
1033 struct scmi_xfer *xfer;
1034 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1035 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1036 struct scmi_xfers_info *minfo = &info->tx_minfo;
1037 struct device *dev = info->dev;
1038
1039 /* Ensure we have sane transfer sizes */
1040 if (rx_size > info->desc->max_msg_size ||
1041 tx_size > info->desc->max_msg_size)
1042 return -ERANGE;
1043
1044 xfer = scmi_xfer_get(pi->handle, minfo, true);
1045 if (IS_ERR(xfer)) {
1046 ret = PTR_ERR(xfer);
1047 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1048 return ret;
1049 }
1050
1051 xfer->tx.len = tx_size;
1052 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1053 xfer->hdr.type = MSG_TYPE_COMMAND;
1054 xfer->hdr.id = msg_id;
1055 xfer->hdr.poll_completion = false;
1056
1057 *p = xfer;
1058
1059 return 0;
1060 }
1061
1062 /**
1063 * version_get() - command to get the revision of the SCMI entity
1064 *
1065 * @ph: Pointer to SCMI protocol handle
1066 * @version: Holds returned version of protocol.
1067 *
1068 * Updates the SCMI information in the internal data structure.
1069 *
1070 * Return: 0 if all went fine, else return appropriate error.
1071 */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1072 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1073 {
1074 int ret;
1075 __le32 *rev_info;
1076 struct scmi_xfer *t;
1077
1078 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1079 if (ret)
1080 return ret;
1081
1082 ret = do_xfer(ph, t);
1083 if (!ret) {
1084 rev_info = t->rx.buf;
1085 *version = le32_to_cpu(*rev_info);
1086 }
1087
1088 xfer_put(ph, t);
1089 return ret;
1090 }
1091
1092 /**
1093 * scmi_set_protocol_priv - Set protocol specific data at init time
1094 *
1095 * @ph: A reference to the protocol handle.
1096 * @priv: The private data to set.
1097 *
1098 * Return: 0 on Success
1099 */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv)1100 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1101 void *priv)
1102 {
1103 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1104
1105 pi->priv = priv;
1106
1107 return 0;
1108 }
1109
1110 /**
1111 * scmi_get_protocol_priv - Set protocol specific data at init time
1112 *
1113 * @ph: A reference to the protocol handle.
1114 *
1115 * Return: Protocol private data if any was set.
1116 */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1117 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1118 {
1119 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1120
1121 return pi->priv;
1122 }
1123
1124 static const struct scmi_xfer_ops xfer_ops = {
1125 .version_get = version_get,
1126 .xfer_get_init = xfer_get_init,
1127 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1128 .do_xfer = do_xfer,
1129 .do_xfer_with_response = do_xfer_with_response,
1130 .xfer_put = xfer_put,
1131 };
1132
1133 struct scmi_msg_resp_domain_name_get {
1134 __le32 flags;
1135 u8 name[SCMI_MAX_STR_SIZE];
1136 };
1137
1138 /**
1139 * scmi_common_extended_name_get - Common helper to get extended resources name
1140 * @ph: A protocol handle reference.
1141 * @cmd_id: The specific command ID to use.
1142 * @res_id: The specific resource ID to use.
1143 * @name: A pointer to the preallocated area where the retrieved name will be
1144 * stored as a NULL terminated string.
1145 * @len: The len in bytes of the @name char array.
1146 *
1147 * Return: 0 on Succcess
1148 */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,char * name,size_t len)1149 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1150 u8 cmd_id, u32 res_id, char *name,
1151 size_t len)
1152 {
1153 int ret;
1154 struct scmi_xfer *t;
1155 struct scmi_msg_resp_domain_name_get *resp;
1156
1157 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
1158 sizeof(*resp), &t);
1159 if (ret)
1160 goto out;
1161
1162 put_unaligned_le32(res_id, t->tx.buf);
1163 resp = t->rx.buf;
1164
1165 ret = ph->xops->do_xfer(ph, t);
1166 if (!ret)
1167 strscpy(name, resp->name, len);
1168
1169 ph->xops->xfer_put(ph, t);
1170 out:
1171 if (ret)
1172 dev_warn(ph->dev,
1173 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1174 res_id, ret, name);
1175 return ret;
1176 }
1177
1178 /**
1179 * struct scmi_iterator - Iterator descriptor
1180 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1181 * a proper custom command payload for each multi-part command request.
1182 * @resp: A reference to the response RX buffer; used by @update_state and
1183 * @process_response to parse the multi-part replies.
1184 * @t: A reference to the underlying xfer initialized and used transparently by
1185 * the iterator internal routines.
1186 * @ph: A reference to the associated protocol handle to be used.
1187 * @ops: A reference to the custom provided iterator operations.
1188 * @state: The current iterator state; used and updated in turn by the iterators
1189 * internal routines and by the caller-provided @scmi_iterator_ops.
1190 * @priv: A reference to optional private data as provided by the caller and
1191 * passed back to the @@scmi_iterator_ops.
1192 */
1193 struct scmi_iterator {
1194 void *msg;
1195 void *resp;
1196 struct scmi_xfer *t;
1197 const struct scmi_protocol_handle *ph;
1198 struct scmi_iterator_ops *ops;
1199 struct scmi_iterator_state state;
1200 void *priv;
1201 };
1202
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1203 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1204 struct scmi_iterator_ops *ops,
1205 unsigned int max_resources, u8 msg_id,
1206 size_t tx_size, void *priv)
1207 {
1208 int ret;
1209 struct scmi_iterator *i;
1210
1211 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1212 if (!i)
1213 return ERR_PTR(-ENOMEM);
1214
1215 i->ph = ph;
1216 i->ops = ops;
1217 i->priv = priv;
1218
1219 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1220 if (ret) {
1221 devm_kfree(ph->dev, i);
1222 return ERR_PTR(ret);
1223 }
1224
1225 i->state.max_resources = max_resources;
1226 i->msg = i->t->tx.buf;
1227 i->resp = i->t->rx.buf;
1228
1229 return i;
1230 }
1231
scmi_iterator_run(void * iter)1232 static int scmi_iterator_run(void *iter)
1233 {
1234 int ret = -EINVAL;
1235 struct scmi_iterator_ops *iops;
1236 const struct scmi_protocol_handle *ph;
1237 struct scmi_iterator_state *st;
1238 struct scmi_iterator *i = iter;
1239
1240 if (!i || !i->ops || !i->ph)
1241 return ret;
1242
1243 iops = i->ops;
1244 ph = i->ph;
1245 st = &i->state;
1246
1247 do {
1248 iops->prepare_message(i->msg, st->desc_index, i->priv);
1249 ret = ph->xops->do_xfer(ph, i->t);
1250 if (ret)
1251 break;
1252
1253 st->rx_len = i->t->rx.len;
1254 ret = iops->update_state(st, i->resp, i->priv);
1255 if (ret)
1256 break;
1257
1258 if (st->num_returned > st->max_resources - st->desc_index) {
1259 dev_err(ph->dev,
1260 "No. of resources can't exceed %d\n",
1261 st->max_resources);
1262 ret = -EINVAL;
1263 break;
1264 }
1265
1266 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1267 st->loop_idx++) {
1268 ret = iops->process_response(ph, i->resp, st, i->priv);
1269 if (ret)
1270 goto out;
1271 }
1272
1273 st->desc_index += st->num_returned;
1274 ph->xops->reset_rx_to_maxsz(ph, i->t);
1275 /*
1276 * check for both returned and remaining to avoid infinite
1277 * loop due to buggy firmware
1278 */
1279 } while (st->num_returned && st->num_remaining);
1280
1281 out:
1282 /* Finalize and destroy iterator */
1283 ph->xops->xfer_put(ph, i->t);
1284 devm_kfree(ph->dev, i);
1285
1286 return ret;
1287 }
1288
1289 struct scmi_msg_get_fc_info {
1290 __le32 domain;
1291 __le32 message_id;
1292 };
1293
1294 struct scmi_msg_resp_desc_fc {
1295 __le32 attr;
1296 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1297 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1298 __le32 rate_limit;
1299 __le32 chan_addr_low;
1300 __le32 chan_addr_high;
1301 __le32 chan_size;
1302 __le32 db_addr_low;
1303 __le32 db_addr_high;
1304 __le32 db_set_lmask;
1305 __le32 db_set_hmask;
1306 __le32 db_preserve_lmask;
1307 __le32 db_preserve_hmask;
1308 };
1309
1310 static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle * ph,u8 describe_id,u32 message_id,u32 valid_size,u32 domain,void __iomem ** p_addr,struct scmi_fc_db_info ** p_db)1311 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1312 u8 describe_id, u32 message_id, u32 valid_size,
1313 u32 domain, void __iomem **p_addr,
1314 struct scmi_fc_db_info **p_db)
1315 {
1316 int ret;
1317 u32 flags;
1318 u64 phys_addr;
1319 u8 size;
1320 void __iomem *addr;
1321 struct scmi_xfer *t;
1322 struct scmi_fc_db_info *db = NULL;
1323 struct scmi_msg_get_fc_info *info;
1324 struct scmi_msg_resp_desc_fc *resp;
1325 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1326
1327 if (!p_addr) {
1328 ret = -EINVAL;
1329 goto err_out;
1330 }
1331
1332 ret = ph->xops->xfer_get_init(ph, describe_id,
1333 sizeof(*info), sizeof(*resp), &t);
1334 if (ret)
1335 goto err_out;
1336
1337 info = t->tx.buf;
1338 info->domain = cpu_to_le32(domain);
1339 info->message_id = cpu_to_le32(message_id);
1340
1341 /*
1342 * Bail out on error leaving fc_info addresses zeroed; this includes
1343 * the case in which the requested domain/message_id does NOT support
1344 * fastchannels at all.
1345 */
1346 ret = ph->xops->do_xfer(ph, t);
1347 if (ret)
1348 goto err_xfer;
1349
1350 resp = t->rx.buf;
1351 flags = le32_to_cpu(resp->attr);
1352 size = le32_to_cpu(resp->chan_size);
1353 if (size != valid_size) {
1354 ret = -EINVAL;
1355 goto err_xfer;
1356 }
1357
1358 phys_addr = le32_to_cpu(resp->chan_addr_low);
1359 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1360 addr = devm_ioremap(ph->dev, phys_addr, size);
1361 if (!addr) {
1362 ret = -EADDRNOTAVAIL;
1363 goto err_xfer;
1364 }
1365
1366 *p_addr = addr;
1367
1368 if (p_db && SUPPORTS_DOORBELL(flags)) {
1369 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1370 if (!db) {
1371 ret = -ENOMEM;
1372 goto err_db;
1373 }
1374
1375 size = 1 << DOORBELL_REG_WIDTH(flags);
1376 phys_addr = le32_to_cpu(resp->db_addr_low);
1377 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1378 addr = devm_ioremap(ph->dev, phys_addr, size);
1379 if (!addr) {
1380 ret = -EADDRNOTAVAIL;
1381 goto err_db_mem;
1382 }
1383
1384 db->addr = addr;
1385 db->width = size;
1386 db->set = le32_to_cpu(resp->db_set_lmask);
1387 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1388 db->mask = le32_to_cpu(resp->db_preserve_lmask);
1389 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1390
1391 *p_db = db;
1392 }
1393
1394 ph->xops->xfer_put(ph, t);
1395
1396 dev_dbg(ph->dev,
1397 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1398 pi->proto->id, message_id, domain);
1399
1400 return;
1401
1402 err_db_mem:
1403 devm_kfree(ph->dev, db);
1404
1405 err_db:
1406 *p_addr = NULL;
1407
1408 err_xfer:
1409 ph->xops->xfer_put(ph, t);
1410
1411 err_out:
1412 dev_warn(ph->dev,
1413 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1414 pi->proto->id, message_id, domain, ret);
1415 }
1416
1417 #define SCMI_PROTO_FC_RING_DB(w) \
1418 do { \
1419 u##w val = 0; \
1420 \
1421 if (db->mask) \
1422 val = ioread##w(db->addr) & db->mask; \
1423 iowrite##w((u##w)db->set | val, db->addr); \
1424 } while (0)
1425
scmi_common_fastchannel_db_ring(struct scmi_fc_db_info * db)1426 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1427 {
1428 if (!db || !db->addr)
1429 return;
1430
1431 if (db->width == 1)
1432 SCMI_PROTO_FC_RING_DB(8);
1433 else if (db->width == 2)
1434 SCMI_PROTO_FC_RING_DB(16);
1435 else if (db->width == 4)
1436 SCMI_PROTO_FC_RING_DB(32);
1437 else /* db->width == 8 */
1438 #ifdef CONFIG_64BIT
1439 SCMI_PROTO_FC_RING_DB(64);
1440 #else
1441 {
1442 u64 val = 0;
1443
1444 if (db->mask)
1445 val = ioread64_hi_lo(db->addr) & db->mask;
1446 iowrite64_hi_lo(db->set | val, db->addr);
1447 }
1448 #endif
1449 }
1450
1451 static const struct scmi_proto_helpers_ops helpers_ops = {
1452 .extended_name_get = scmi_common_extended_name_get,
1453 .iter_response_init = scmi_iterator_init,
1454 .iter_response_run = scmi_iterator_run,
1455 .fastchannel_init = scmi_common_fastchannel_init,
1456 .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
1457 };
1458
1459 /**
1460 * scmi_revision_area_get - Retrieve version memory area.
1461 *
1462 * @ph: A reference to the protocol handle.
1463 *
1464 * A helper to grab the version memory area reference during SCMI Base protocol
1465 * initialization.
1466 *
1467 * Return: A reference to the version memory area associated to the SCMI
1468 * instance underlying this protocol handle.
1469 */
1470 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)1471 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1472 {
1473 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1474
1475 return pi->handle->version;
1476 }
1477
1478 /**
1479 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1480 * instance descriptor.
1481 * @info: The reference to the related SCMI instance.
1482 * @proto: The protocol descriptor.
1483 *
1484 * Allocate a new protocol instance descriptor, using the provided @proto
1485 * description, against the specified SCMI instance @info, and initialize it;
1486 * all resources management is handled via a dedicated per-protocol devres
1487 * group.
1488 *
1489 * Context: Assumes to be called with @protocols_mtx already acquired.
1490 * Return: A reference to a freshly allocated and initialized protocol instance
1491 * or ERR_PTR on failure. On failure the @proto reference is at first
1492 * put using @scmi_protocol_put() before releasing all the devres group.
1493 */
1494 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)1495 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1496 const struct scmi_protocol *proto)
1497 {
1498 int ret = -ENOMEM;
1499 void *gid;
1500 struct scmi_protocol_instance *pi;
1501 const struct scmi_handle *handle = &info->handle;
1502
1503 /* Protocol specific devres group */
1504 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1505 if (!gid) {
1506 scmi_protocol_put(proto->id);
1507 goto out;
1508 }
1509
1510 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1511 if (!pi)
1512 goto clean;
1513
1514 pi->gid = gid;
1515 pi->proto = proto;
1516 pi->handle = handle;
1517 pi->ph.dev = handle->dev;
1518 pi->ph.xops = &xfer_ops;
1519 pi->ph.hops = &helpers_ops;
1520 pi->ph.set_priv = scmi_set_protocol_priv;
1521 pi->ph.get_priv = scmi_get_protocol_priv;
1522 refcount_set(&pi->users, 1);
1523 /* proto->init is assured NON NULL by scmi_protocol_register */
1524 ret = pi->proto->instance_init(&pi->ph);
1525 if (ret)
1526 goto clean;
1527
1528 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1529 GFP_KERNEL);
1530 if (ret != proto->id)
1531 goto clean;
1532
1533 /*
1534 * Warn but ignore events registration errors since we do not want
1535 * to skip whole protocols if their notifications are messed up.
1536 */
1537 if (pi->proto->events) {
1538 ret = scmi_register_protocol_events(handle, pi->proto->id,
1539 &pi->ph,
1540 pi->proto->events);
1541 if (ret)
1542 dev_warn(handle->dev,
1543 "Protocol:%X - Events Registration Failed - err:%d\n",
1544 pi->proto->id, ret);
1545 }
1546
1547 devres_close_group(handle->dev, pi->gid);
1548 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1549
1550 return pi;
1551
1552 clean:
1553 /* Take care to put the protocol module's owner before releasing all */
1554 scmi_protocol_put(proto->id);
1555 devres_release_group(handle->dev, gid);
1556 out:
1557 return ERR_PTR(ret);
1558 }
1559
1560 /**
1561 * scmi_get_protocol_instance - Protocol initialization helper.
1562 * @handle: A reference to the SCMI platform instance.
1563 * @protocol_id: The protocol being requested.
1564 *
1565 * In case the required protocol has never been requested before for this
1566 * instance, allocate and initialize all the needed structures while handling
1567 * resource allocation with a dedicated per-protocol devres subgroup.
1568 *
1569 * Return: A reference to an initialized protocol instance or error on failure:
1570 * in particular returns -EPROBE_DEFER when the desired protocol could
1571 * NOT be found.
1572 */
1573 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)1574 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1575 {
1576 struct scmi_protocol_instance *pi;
1577 struct scmi_info *info = handle_to_scmi_info(handle);
1578
1579 mutex_lock(&info->protocols_mtx);
1580 pi = idr_find(&info->protocols, protocol_id);
1581
1582 if (pi) {
1583 refcount_inc(&pi->users);
1584 } else {
1585 const struct scmi_protocol *proto;
1586
1587 /* Fails if protocol not registered on bus */
1588 proto = scmi_protocol_get(protocol_id);
1589 if (proto)
1590 pi = scmi_alloc_init_protocol_instance(info, proto);
1591 else
1592 pi = ERR_PTR(-EPROBE_DEFER);
1593 }
1594 mutex_unlock(&info->protocols_mtx);
1595
1596 return pi;
1597 }
1598
1599 /**
1600 * scmi_protocol_acquire - Protocol acquire
1601 * @handle: A reference to the SCMI platform instance.
1602 * @protocol_id: The protocol being requested.
1603 *
1604 * Register a new user for the requested protocol on the specified SCMI
1605 * platform instance, possibly triggering its initialization on first user.
1606 *
1607 * Return: 0 if protocol was acquired successfully.
1608 */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)1609 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1610 {
1611 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1612 }
1613
1614 /**
1615 * scmi_protocol_release - Protocol de-initialization helper.
1616 * @handle: A reference to the SCMI platform instance.
1617 * @protocol_id: The protocol being requested.
1618 *
1619 * Remove one user for the specified protocol and triggers de-initialization
1620 * and resources de-allocation once the last user has gone.
1621 */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)1622 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1623 {
1624 struct scmi_info *info = handle_to_scmi_info(handle);
1625 struct scmi_protocol_instance *pi;
1626
1627 mutex_lock(&info->protocols_mtx);
1628 pi = idr_find(&info->protocols, protocol_id);
1629 if (WARN_ON(!pi))
1630 goto out;
1631
1632 if (refcount_dec_and_test(&pi->users)) {
1633 void *gid = pi->gid;
1634
1635 if (pi->proto->events)
1636 scmi_deregister_protocol_events(handle, protocol_id);
1637
1638 if (pi->proto->instance_deinit)
1639 pi->proto->instance_deinit(&pi->ph);
1640
1641 idr_remove(&info->protocols, protocol_id);
1642
1643 scmi_protocol_put(protocol_id);
1644
1645 devres_release_group(handle->dev, gid);
1646 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1647 protocol_id);
1648 }
1649
1650 out:
1651 mutex_unlock(&info->protocols_mtx);
1652 }
1653
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)1654 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1655 u8 *prot_imp)
1656 {
1657 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1658 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1659
1660 info->protocols_imp = prot_imp;
1661 }
1662
1663 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)1664 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1665 {
1666 int i;
1667 struct scmi_info *info = handle_to_scmi_info(handle);
1668 struct scmi_revision_info *rev = handle->version;
1669
1670 if (!info->protocols_imp)
1671 return false;
1672
1673 for (i = 0; i < rev->num_protocols; i++)
1674 if (info->protocols_imp[i] == prot_id)
1675 return true;
1676 return false;
1677 }
1678
1679 struct scmi_protocol_devres {
1680 const struct scmi_handle *handle;
1681 u8 protocol_id;
1682 };
1683
scmi_devm_release_protocol(struct device * dev,void * res)1684 static void scmi_devm_release_protocol(struct device *dev, void *res)
1685 {
1686 struct scmi_protocol_devres *dres = res;
1687
1688 scmi_protocol_release(dres->handle, dres->protocol_id);
1689 }
1690
1691 static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device * sdev,u8 protocol_id)1692 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
1693 {
1694 struct scmi_protocol_instance *pi;
1695 struct scmi_protocol_devres *dres;
1696
1697 dres = devres_alloc(scmi_devm_release_protocol,
1698 sizeof(*dres), GFP_KERNEL);
1699 if (!dres)
1700 return ERR_PTR(-ENOMEM);
1701
1702 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
1703 if (IS_ERR(pi)) {
1704 devres_free(dres);
1705 return pi;
1706 }
1707
1708 dres->handle = sdev->handle;
1709 dres->protocol_id = protocol_id;
1710 devres_add(&sdev->dev, dres);
1711
1712 return pi;
1713 }
1714
1715 /**
1716 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1717 * @sdev: A reference to an scmi_device whose embedded struct device is to
1718 * be used for devres accounting.
1719 * @protocol_id: The protocol being requested.
1720 * @ph: A pointer reference used to pass back the associated protocol handle.
1721 *
1722 * Get hold of a protocol accounting for its usage, eventually triggering its
1723 * initialization, and returning the protocol specific operations and related
1724 * protocol handle which will be used as first argument in most of the
1725 * protocols operations methods.
1726 * Being a devres based managed method, protocol hold will be automatically
1727 * released, and possibly de-initialized on last user, once the SCMI driver
1728 * owning the scmi_device is unbound from it.
1729 *
1730 * Return: A reference to the requested protocol operations or error.
1731 * Must be checked for errors by caller.
1732 */
1733 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)1734 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1735 struct scmi_protocol_handle **ph)
1736 {
1737 struct scmi_protocol_instance *pi;
1738
1739 if (!ph)
1740 return ERR_PTR(-EINVAL);
1741
1742 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
1743 if (IS_ERR(pi))
1744 return pi;
1745
1746 *ph = &pi->ph;
1747
1748 return pi->proto->ops;
1749 }
1750
1751 /**
1752 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
1753 * @sdev: A reference to an scmi_device whose embedded struct device is to
1754 * be used for devres accounting.
1755 * @protocol_id: The protocol being requested.
1756 *
1757 * Get hold of a protocol accounting for its usage, possibly triggering its
1758 * initialization but without getting access to its protocol specific operations
1759 * and handle.
1760 *
1761 * Being a devres based managed method, protocol hold will be automatically
1762 * released, and possibly de-initialized on last user, once the SCMI driver
1763 * owning the scmi_device is unbound from it.
1764 *
1765 * Return: 0 on SUCCESS
1766 */
scmi_devm_protocol_acquire(struct scmi_device * sdev,u8 protocol_id)1767 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
1768 u8 protocol_id)
1769 {
1770 struct scmi_protocol_instance *pi;
1771
1772 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
1773 if (IS_ERR(pi))
1774 return PTR_ERR(pi);
1775
1776 return 0;
1777 }
1778
scmi_devm_protocol_match(struct device * dev,void * res,void * data)1779 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1780 {
1781 struct scmi_protocol_devres *dres = res;
1782
1783 if (WARN_ON(!dres || !data))
1784 return 0;
1785
1786 return dres->protocol_id == *((u8 *)data);
1787 }
1788
1789 /**
1790 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1791 * @sdev: A reference to an scmi_device whose embedded struct device is to
1792 * be used for devres accounting.
1793 * @protocol_id: The protocol being requested.
1794 *
1795 * Explicitly release a protocol hold previously obtained calling the above
1796 * @scmi_devm_protocol_get.
1797 */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)1798 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1799 {
1800 int ret;
1801
1802 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1803 scmi_devm_protocol_match, &protocol_id);
1804 WARN_ON(ret);
1805 }
1806
1807 /**
1808 * scmi_is_transport_atomic - Method to check if underlying transport for an
1809 * SCMI instance is configured as atomic.
1810 *
1811 * @handle: A reference to the SCMI platform instance.
1812 * @atomic_threshold: An optional return value for the system wide currently
1813 * configured threshold for atomic operations.
1814 *
1815 * Return: True if transport is configured as atomic
1816 */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)1817 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
1818 unsigned int *atomic_threshold)
1819 {
1820 bool ret;
1821 struct scmi_info *info = handle_to_scmi_info(handle);
1822
1823 ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
1824 if (ret && atomic_threshold)
1825 *atomic_threshold = info->atomic_threshold;
1826
1827 return ret;
1828 }
1829
1830 static inline
scmi_handle_get_from_info_unlocked(struct scmi_info * info)1831 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1832 {
1833 info->users++;
1834 return &info->handle;
1835 }
1836
1837 /**
1838 * scmi_handle_get() - Get the SCMI handle for a device
1839 *
1840 * @dev: pointer to device for which we want SCMI handle
1841 *
1842 * NOTE: The function does not track individual clients of the framework
1843 * and is expected to be maintained by caller of SCMI protocol library.
1844 * scmi_handle_put must be balanced with successful scmi_handle_get
1845 *
1846 * Return: pointer to handle if successful, NULL on error
1847 */
scmi_handle_get(struct device * dev)1848 struct scmi_handle *scmi_handle_get(struct device *dev)
1849 {
1850 struct list_head *p;
1851 struct scmi_info *info;
1852 struct scmi_handle *handle = NULL;
1853
1854 mutex_lock(&scmi_list_mutex);
1855 list_for_each(p, &scmi_list) {
1856 info = list_entry(p, struct scmi_info, node);
1857 if (dev->parent == info->dev) {
1858 handle = scmi_handle_get_from_info_unlocked(info);
1859 break;
1860 }
1861 }
1862 mutex_unlock(&scmi_list_mutex);
1863
1864 return handle;
1865 }
1866
1867 /**
1868 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1869 *
1870 * @handle: handle acquired by scmi_handle_get
1871 *
1872 * NOTE: The function does not track individual clients of the framework
1873 * and is expected to be maintained by caller of SCMI protocol library.
1874 * scmi_handle_put must be balanced with successful scmi_handle_get
1875 *
1876 * Return: 0 is successfully released
1877 * if null was passed, it returns -EINVAL;
1878 */
scmi_handle_put(const struct scmi_handle * handle)1879 int scmi_handle_put(const struct scmi_handle *handle)
1880 {
1881 struct scmi_info *info;
1882
1883 if (!handle)
1884 return -EINVAL;
1885
1886 info = handle_to_scmi_info(handle);
1887 mutex_lock(&scmi_list_mutex);
1888 if (!WARN_ON(!info->users))
1889 info->users--;
1890 mutex_unlock(&scmi_list_mutex);
1891
1892 return 0;
1893 }
1894
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)1895 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1896 struct scmi_xfers_info *info)
1897 {
1898 int i;
1899 struct scmi_xfer *xfer;
1900 struct device *dev = sinfo->dev;
1901 const struct scmi_desc *desc = sinfo->desc;
1902
1903 /* Pre-allocated messages, no more than what hdr.seq can support */
1904 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
1905 dev_err(dev,
1906 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1907 info->max_msg, MSG_TOKEN_MAX);
1908 return -EINVAL;
1909 }
1910
1911 hash_init(info->pending_xfers);
1912
1913 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1914 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1915 sizeof(long), GFP_KERNEL);
1916 if (!info->xfer_alloc_table)
1917 return -ENOMEM;
1918
1919 /*
1920 * Preallocate a number of xfers equal to max inflight messages,
1921 * pre-initialize the buffer pointer to pre-allocated buffers and
1922 * attach all of them to the free list
1923 */
1924 INIT_HLIST_HEAD(&info->free_xfers);
1925 for (i = 0; i < info->max_msg; i++) {
1926 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1927 if (!xfer)
1928 return -ENOMEM;
1929
1930 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1931 GFP_KERNEL);
1932 if (!xfer->rx.buf)
1933 return -ENOMEM;
1934
1935 xfer->tx.buf = xfer->rx.buf;
1936 init_completion(&xfer->done);
1937 spin_lock_init(&xfer->lock);
1938
1939 /* Add initialized xfer to the free list */
1940 hlist_add_head(&xfer->node, &info->free_xfers);
1941 }
1942
1943 spin_lock_init(&info->xfer_lock);
1944
1945 return 0;
1946 }
1947
scmi_channels_max_msg_configure(struct scmi_info * sinfo)1948 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1949 {
1950 const struct scmi_desc *desc = sinfo->desc;
1951
1952 if (!desc->ops->get_max_msg) {
1953 sinfo->tx_minfo.max_msg = desc->max_msg;
1954 sinfo->rx_minfo.max_msg = desc->max_msg;
1955 } else {
1956 struct scmi_chan_info *base_cinfo;
1957
1958 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1959 if (!base_cinfo)
1960 return -EINVAL;
1961 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1962
1963 /* RX channel is optional so can be skipped */
1964 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1965 if (base_cinfo)
1966 sinfo->rx_minfo.max_msg =
1967 desc->ops->get_max_msg(base_cinfo);
1968 }
1969
1970 return 0;
1971 }
1972
scmi_xfer_info_init(struct scmi_info * sinfo)1973 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1974 {
1975 int ret;
1976
1977 ret = scmi_channels_max_msg_configure(sinfo);
1978 if (ret)
1979 return ret;
1980
1981 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1982 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1983 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1984
1985 return ret;
1986 }
1987
scmi_chan_setup(struct scmi_info * info,struct device * dev,int prot_id,bool tx)1988 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1989 int prot_id, bool tx)
1990 {
1991 int ret, idx;
1992 struct scmi_chan_info *cinfo;
1993 struct idr *idr;
1994
1995 /* Transmit channel is first entry i.e. index 0 */
1996 idx = tx ? 0 : 1;
1997 idr = tx ? &info->tx_idr : &info->rx_idr;
1998
1999 /* check if already allocated, used for multiple device per protocol */
2000 cinfo = idr_find(idr, prot_id);
2001 if (cinfo)
2002 return 0;
2003
2004 if (!info->desc->ops->chan_available(dev, idx)) {
2005 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2006 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2007 return -EINVAL;
2008 goto idr_alloc;
2009 }
2010
2011 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2012 if (!cinfo)
2013 return -ENOMEM;
2014
2015 cinfo->dev = dev;
2016 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2017
2018 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2019 if (ret)
2020 return ret;
2021
2022 if (tx && is_polling_required(cinfo, info)) {
2023 if (is_transport_polling_capable(info))
2024 dev_info(dev,
2025 "Enabled polling mode TX channel - prot_id:%d\n",
2026 prot_id);
2027 else
2028 dev_warn(dev,
2029 "Polling mode NOT supported by transport.\n");
2030 }
2031
2032 idr_alloc:
2033 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2034 if (ret != prot_id) {
2035 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
2036 return ret;
2037 }
2038
2039 cinfo->handle = &info->handle;
2040 return 0;
2041 }
2042
2043 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device * dev,int prot_id)2044 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
2045 {
2046 int ret = scmi_chan_setup(info, dev, prot_id, true);
2047
2048 if (!ret) {
2049 /* Rx is optional, report only memory errors */
2050 ret = scmi_chan_setup(info, dev, prot_id, false);
2051 if (ret && ret != -ENOMEM)
2052 ret = 0;
2053 }
2054
2055 return ret;
2056 }
2057
2058 /**
2059 * scmi_get_protocol_device - Helper to get/create an SCMI device.
2060 *
2061 * @np: A device node representing a valid active protocols for the referred
2062 * SCMI instance.
2063 * @info: The referred SCMI instance for which we are getting/creating this
2064 * device.
2065 * @prot_id: The protocol ID.
2066 * @name: The device name.
2067 *
2068 * Referring to the specific SCMI instance identified by @info, this helper
2069 * takes care to return a properly initialized device matching the requested
2070 * @proto_id and @name: if device was still not existent it is created as a
2071 * child of the specified SCMI instance @info and its transport properly
2072 * initialized as usual.
2073 *
2074 * Return: A properly initialized scmi device, NULL otherwise.
2075 */
2076 static inline struct scmi_device *
scmi_get_protocol_device(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)2077 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
2078 int prot_id, const char *name)
2079 {
2080 struct scmi_device *sdev;
2081
2082 /* Already created for this parent SCMI instance ? */
2083 sdev = scmi_child_dev_find(info->dev, prot_id, name);
2084 if (sdev)
2085 return sdev;
2086
2087 mutex_lock(&scmi_syspower_mtx);
2088 if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
2089 dev_warn(info->dev,
2090 "SCMI SystemPower protocol device must be unique !\n");
2091 mutex_unlock(&scmi_syspower_mtx);
2092
2093 return NULL;
2094 }
2095
2096 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
2097
2098 sdev = scmi_device_create(np, info->dev, prot_id, name);
2099 if (!sdev) {
2100 dev_err(info->dev, "failed to create %d protocol device\n",
2101 prot_id);
2102 mutex_unlock(&scmi_syspower_mtx);
2103
2104 return NULL;
2105 }
2106
2107 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
2108 dev_err(&sdev->dev, "failed to setup transport\n");
2109 scmi_device_destroy(sdev);
2110 mutex_unlock(&scmi_syspower_mtx);
2111
2112 return NULL;
2113 }
2114
2115 if (prot_id == SCMI_PROTOCOL_SYSTEM)
2116 scmi_syspower_registered = true;
2117
2118 mutex_unlock(&scmi_syspower_mtx);
2119
2120 return sdev;
2121 }
2122
2123 static inline void
scmi_create_protocol_device(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)2124 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
2125 int prot_id, const char *name)
2126 {
2127 struct scmi_device *sdev;
2128
2129 sdev = scmi_get_protocol_device(np, info, prot_id, name);
2130 if (!sdev)
2131 return;
2132
2133 /* setup handle now as the transport is ready */
2134 scmi_set_handle(sdev);
2135 }
2136
2137 /**
2138 * scmi_create_protocol_devices - Create devices for all pending requests for
2139 * this SCMI instance.
2140 *
2141 * @np: The device node describing the protocol
2142 * @info: The SCMI instance descriptor
2143 * @prot_id: The protocol ID
2144 *
2145 * All devices previously requested for this instance (if any) are found and
2146 * created by scanning the proper @&scmi_requested_devices entry.
2147 */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id)2148 static void scmi_create_protocol_devices(struct device_node *np,
2149 struct scmi_info *info, int prot_id)
2150 {
2151 struct list_head *phead;
2152
2153 mutex_lock(&scmi_requested_devices_mtx);
2154 phead = idr_find(&scmi_requested_devices, prot_id);
2155 if (phead) {
2156 struct scmi_requested_dev *rdev;
2157
2158 list_for_each_entry(rdev, phead, node)
2159 scmi_create_protocol_device(np, info, prot_id,
2160 rdev->id_table->name);
2161 }
2162 mutex_unlock(&scmi_requested_devices_mtx);
2163 }
2164
2165 /**
2166 * scmi_protocol_device_request - Helper to request a device
2167 *
2168 * @id_table: A protocol/name pair descriptor for the device to be created.
2169 *
2170 * This helper let an SCMI driver request specific devices identified by the
2171 * @id_table to be created for each active SCMI instance.
2172 *
2173 * The requested device name MUST NOT be already existent for any protocol;
2174 * at first the freshly requested @id_table is annotated in the IDR table
2175 * @scmi_requested_devices, then a matching device is created for each already
2176 * active SCMI instance. (if any)
2177 *
2178 * This way the requested device is created straight-away for all the already
2179 * initialized(probed) SCMI instances (handles) and it remains also annotated
2180 * as pending creation if the requesting SCMI driver was loaded before some
2181 * SCMI instance and related transports were available: when such late instance
2182 * is probed, its probe will take care to scan the list of pending requested
2183 * devices and create those on its own (see @scmi_create_protocol_devices and
2184 * its enclosing loop)
2185 *
2186 * Return: 0 on Success
2187 */
scmi_protocol_device_request(const struct scmi_device_id * id_table)2188 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
2189 {
2190 int ret = 0;
2191 unsigned int id = 0;
2192 struct list_head *head, *phead = NULL;
2193 struct scmi_requested_dev *rdev;
2194 struct scmi_info *info;
2195
2196 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
2197 id_table->name, id_table->protocol_id);
2198
2199 /*
2200 * Search for the matching protocol rdev list and then search
2201 * of any existent equally named device...fails if any duplicate found.
2202 */
2203 mutex_lock(&scmi_requested_devices_mtx);
2204 idr_for_each_entry(&scmi_requested_devices, head, id) {
2205 if (!phead) {
2206 /* A list found registered in the IDR is never empty */
2207 rdev = list_first_entry(head, struct scmi_requested_dev,
2208 node);
2209 if (rdev->id_table->protocol_id ==
2210 id_table->protocol_id)
2211 phead = head;
2212 }
2213 list_for_each_entry(rdev, head, node) {
2214 if (!strcmp(rdev->id_table->name, id_table->name)) {
2215 pr_err("Ignoring duplicate request [%d] %s\n",
2216 rdev->id_table->protocol_id,
2217 rdev->id_table->name);
2218 ret = -EINVAL;
2219 goto out;
2220 }
2221 }
2222 }
2223
2224 /*
2225 * No duplicate found for requested id_table, so let's create a new
2226 * requested device entry for this new valid request.
2227 */
2228 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2229 if (!rdev) {
2230 ret = -ENOMEM;
2231 goto out;
2232 }
2233 rdev->id_table = id_table;
2234
2235 /*
2236 * Append the new requested device table descriptor to the head of the
2237 * related protocol list, eventually creating such head if not already
2238 * there.
2239 */
2240 if (!phead) {
2241 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
2242 if (!phead) {
2243 kfree(rdev);
2244 ret = -ENOMEM;
2245 goto out;
2246 }
2247 INIT_LIST_HEAD(phead);
2248
2249 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
2250 id_table->protocol_id,
2251 id_table->protocol_id + 1, GFP_KERNEL);
2252 if (ret != id_table->protocol_id) {
2253 pr_err("Failed to save SCMI device - ret:%d\n", ret);
2254 kfree(rdev);
2255 kfree(phead);
2256 ret = -EINVAL;
2257 goto out;
2258 }
2259 ret = 0;
2260 }
2261 list_add(&rdev->node, phead);
2262
2263 /*
2264 * Now effectively create and initialize the requested device for every
2265 * already initialized SCMI instance which has registered the requested
2266 * protocol as a valid active one: i.e. defined in DT and supported by
2267 * current platform FW.
2268 */
2269 mutex_lock(&scmi_list_mutex);
2270 list_for_each_entry(info, &scmi_list, node) {
2271 struct device_node *child;
2272
2273 child = idr_find(&info->active_protocols,
2274 id_table->protocol_id);
2275 if (child) {
2276 struct scmi_device *sdev;
2277
2278 sdev = scmi_get_protocol_device(child, info,
2279 id_table->protocol_id,
2280 id_table->name);
2281 if (sdev) {
2282 /* Set handle if not already set: device existed */
2283 if (!sdev->handle)
2284 sdev->handle =
2285 scmi_handle_get_from_info_unlocked(info);
2286 /* Relink consumer and suppliers */
2287 if (sdev->handle)
2288 scmi_device_link_add(&sdev->dev,
2289 sdev->handle->dev);
2290 }
2291 } else {
2292 dev_err(info->dev,
2293 "Failed. SCMI protocol %d not active.\n",
2294 id_table->protocol_id);
2295 }
2296 }
2297 mutex_unlock(&scmi_list_mutex);
2298
2299 out:
2300 mutex_unlock(&scmi_requested_devices_mtx);
2301
2302 return ret;
2303 }
2304
2305 /**
2306 * scmi_protocol_device_unrequest - Helper to unrequest a device
2307 *
2308 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
2309 *
2310 * An helper to let an SCMI driver release its request about devices; note that
2311 * devices are created and initialized once the first SCMI driver request them
2312 * but they destroyed only on SCMI core unloading/unbinding.
2313 *
2314 * The current SCMI transport layer uses such devices as internal references and
2315 * as such they could be shared as same transport between multiple drivers so
2316 * that cannot be safely destroyed till the whole SCMI stack is removed.
2317 * (unless adding further burden of refcounting.)
2318 */
scmi_protocol_device_unrequest(const struct scmi_device_id * id_table)2319 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
2320 {
2321 struct list_head *phead;
2322
2323 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
2324 id_table->name, id_table->protocol_id);
2325
2326 mutex_lock(&scmi_requested_devices_mtx);
2327 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
2328 if (phead) {
2329 struct scmi_requested_dev *victim, *tmp;
2330
2331 list_for_each_entry_safe(victim, tmp, phead, node) {
2332 if (!strcmp(victim->id_table->name, id_table->name)) {
2333 list_del(&victim->node);
2334 kfree(victim);
2335 break;
2336 }
2337 }
2338
2339 if (list_empty(phead)) {
2340 idr_remove(&scmi_requested_devices,
2341 id_table->protocol_id);
2342 kfree(phead);
2343 }
2344 }
2345 mutex_unlock(&scmi_requested_devices_mtx);
2346 }
2347
scmi_cleanup_txrx_channels(struct scmi_info * info)2348 static int scmi_cleanup_txrx_channels(struct scmi_info *info)
2349 {
2350 int ret;
2351 struct idr *idr = &info->tx_idr;
2352
2353 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2354 idr_destroy(&info->tx_idr);
2355
2356 idr = &info->rx_idr;
2357 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2358 idr_destroy(&info->rx_idr);
2359
2360 return ret;
2361 }
2362
scmi_probe(struct platform_device * pdev)2363 static int scmi_probe(struct platform_device *pdev)
2364 {
2365 int ret;
2366 struct scmi_handle *handle;
2367 const struct scmi_desc *desc;
2368 struct scmi_info *info;
2369 struct device *dev = &pdev->dev;
2370 struct device_node *child, *np = dev->of_node;
2371
2372 desc = of_device_get_match_data(dev);
2373 if (!desc)
2374 return -EINVAL;
2375
2376 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2377 if (!info)
2378 return -ENOMEM;
2379
2380 info->dev = dev;
2381 info->desc = desc;
2382 INIT_LIST_HEAD(&info->node);
2383 idr_init(&info->protocols);
2384 mutex_init(&info->protocols_mtx);
2385 idr_init(&info->active_protocols);
2386
2387 platform_set_drvdata(pdev, info);
2388 idr_init(&info->tx_idr);
2389 idr_init(&info->rx_idr);
2390
2391 handle = &info->handle;
2392 handle->dev = info->dev;
2393 handle->version = &info->version;
2394 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
2395 handle->devm_protocol_get = scmi_devm_protocol_get;
2396 handle->devm_protocol_put = scmi_devm_protocol_put;
2397
2398 /* System wide atomic threshold for atomic ops .. if any */
2399 if (!of_property_read_u32(np, "atomic-threshold-us",
2400 &info->atomic_threshold))
2401 dev_info(dev,
2402 "SCMI System wide atomic threshold set to %d us\n",
2403 info->atomic_threshold);
2404 handle->is_transport_atomic = scmi_is_transport_atomic;
2405
2406 if (desc->ops->link_supplier) {
2407 ret = desc->ops->link_supplier(dev);
2408 if (ret)
2409 return ret;
2410 }
2411
2412 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
2413 if (ret)
2414 return ret;
2415
2416 ret = scmi_xfer_info_init(info);
2417 if (ret)
2418 goto clear_txrx_setup;
2419
2420 if (scmi_notification_init(handle))
2421 dev_err(dev, "SCMI Notifications NOT available.\n");
2422
2423 if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
2424 dev_err(dev,
2425 "Transport is not polling capable. Atomic mode not supported.\n");
2426
2427 /*
2428 * Trigger SCMI Base protocol initialization.
2429 * It's mandatory and won't be ever released/deinit until the
2430 * SCMI stack is shutdown/unloaded as a whole.
2431 */
2432 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2433 if (ret) {
2434 dev_err(dev, "unable to communicate with SCMI\n");
2435 goto notification_exit;
2436 }
2437
2438 mutex_lock(&scmi_list_mutex);
2439 list_add_tail(&info->node, &scmi_list);
2440 mutex_unlock(&scmi_list_mutex);
2441
2442 for_each_available_child_of_node(np, child) {
2443 u32 prot_id;
2444
2445 if (of_property_read_u32(child, "reg", &prot_id))
2446 continue;
2447
2448 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2449 dev_err(dev, "Out of range protocol %d\n", prot_id);
2450
2451 if (!scmi_is_protocol_implemented(handle, prot_id)) {
2452 dev_err(dev, "SCMI protocol %d not implemented\n",
2453 prot_id);
2454 continue;
2455 }
2456
2457 /*
2458 * Save this valid DT protocol descriptor amongst
2459 * @active_protocols for this SCMI instance/
2460 */
2461 ret = idr_alloc(&info->active_protocols, child,
2462 prot_id, prot_id + 1, GFP_KERNEL);
2463 if (ret != prot_id) {
2464 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2465 prot_id);
2466 continue;
2467 }
2468
2469 of_node_get(child);
2470 scmi_create_protocol_devices(child, info, prot_id);
2471 }
2472
2473 return 0;
2474
2475 notification_exit:
2476 scmi_notification_exit(&info->handle);
2477 clear_txrx_setup:
2478 scmi_cleanup_txrx_channels(info);
2479 return ret;
2480 }
2481
scmi_free_channel(struct scmi_chan_info * cinfo,struct idr * idr,int id)2482 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2483 {
2484 idr_remove(idr, id);
2485 }
2486
scmi_remove(struct platform_device * pdev)2487 static int scmi_remove(struct platform_device *pdev)
2488 {
2489 int ret, id;
2490 struct scmi_info *info = platform_get_drvdata(pdev);
2491 struct device_node *child;
2492
2493 mutex_lock(&scmi_list_mutex);
2494 if (info->users)
2495 dev_warn(&pdev->dev,
2496 "Still active SCMI users will be forcibly unbound.\n");
2497 list_del(&info->node);
2498 mutex_unlock(&scmi_list_mutex);
2499
2500 scmi_notification_exit(&info->handle);
2501
2502 mutex_lock(&info->protocols_mtx);
2503 idr_destroy(&info->protocols);
2504 mutex_unlock(&info->protocols_mtx);
2505
2506 idr_for_each_entry(&info->active_protocols, child, id)
2507 of_node_put(child);
2508 idr_destroy(&info->active_protocols);
2509
2510 /* Safe to free channels since no more users */
2511 ret = scmi_cleanup_txrx_channels(info);
2512 if (ret)
2513 dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
2514
2515 return 0;
2516 }
2517
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)2518 static ssize_t protocol_version_show(struct device *dev,
2519 struct device_attribute *attr, char *buf)
2520 {
2521 struct scmi_info *info = dev_get_drvdata(dev);
2522
2523 return sprintf(buf, "%u.%u\n", info->version.major_ver,
2524 info->version.minor_ver);
2525 }
2526 static DEVICE_ATTR_RO(protocol_version);
2527
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)2528 static ssize_t firmware_version_show(struct device *dev,
2529 struct device_attribute *attr, char *buf)
2530 {
2531 struct scmi_info *info = dev_get_drvdata(dev);
2532
2533 return sprintf(buf, "0x%x\n", info->version.impl_ver);
2534 }
2535 static DEVICE_ATTR_RO(firmware_version);
2536
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2537 static ssize_t vendor_id_show(struct device *dev,
2538 struct device_attribute *attr, char *buf)
2539 {
2540 struct scmi_info *info = dev_get_drvdata(dev);
2541
2542 return sprintf(buf, "%s\n", info->version.vendor_id);
2543 }
2544 static DEVICE_ATTR_RO(vendor_id);
2545
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)2546 static ssize_t sub_vendor_id_show(struct device *dev,
2547 struct device_attribute *attr, char *buf)
2548 {
2549 struct scmi_info *info = dev_get_drvdata(dev);
2550
2551 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2552 }
2553 static DEVICE_ATTR_RO(sub_vendor_id);
2554
2555 static struct attribute *versions_attrs[] = {
2556 &dev_attr_firmware_version.attr,
2557 &dev_attr_protocol_version.attr,
2558 &dev_attr_vendor_id.attr,
2559 &dev_attr_sub_vendor_id.attr,
2560 NULL,
2561 };
2562 ATTRIBUTE_GROUPS(versions);
2563
2564 /* Each compatible listed below must have descriptor associated with it */
2565 static const struct of_device_id scmi_of_match[] = {
2566 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2567 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2568 #endif
2569 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2570 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2571 #endif
2572 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2573 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2574 #endif
2575 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2576 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2577 #endif
2578 { /* Sentinel */ },
2579 };
2580
2581 MODULE_DEVICE_TABLE(of, scmi_of_match);
2582
2583 static struct platform_driver scmi_driver = {
2584 .driver = {
2585 .name = "arm-scmi",
2586 .suppress_bind_attrs = true,
2587 .of_match_table = scmi_of_match,
2588 .dev_groups = versions_groups,
2589 },
2590 .probe = scmi_probe,
2591 .remove = scmi_remove,
2592 };
2593
2594 /**
2595 * __scmi_transports_setup - Common helper to call transport-specific
2596 * .init/.exit code if provided.
2597 *
2598 * @init: A flag to distinguish between init and exit.
2599 *
2600 * Note that, if provided, we invoke .init/.exit functions for all the
2601 * transports currently compiled in.
2602 *
2603 * Return: 0 on Success.
2604 */
__scmi_transports_setup(bool init)2605 static inline int __scmi_transports_setup(bool init)
2606 {
2607 int ret = 0;
2608 const struct of_device_id *trans;
2609
2610 for (trans = scmi_of_match; trans->data; trans++) {
2611 const struct scmi_desc *tdesc = trans->data;
2612
2613 if ((init && !tdesc->transport_init) ||
2614 (!init && !tdesc->transport_exit))
2615 continue;
2616
2617 if (init)
2618 ret = tdesc->transport_init();
2619 else
2620 tdesc->transport_exit();
2621
2622 if (ret) {
2623 pr_err("SCMI transport %s FAILED initialization!\n",
2624 trans->compatible);
2625 break;
2626 }
2627 }
2628
2629 return ret;
2630 }
2631
scmi_transports_init(void)2632 static int __init scmi_transports_init(void)
2633 {
2634 return __scmi_transports_setup(true);
2635 }
2636
scmi_transports_exit(void)2637 static void __exit scmi_transports_exit(void)
2638 {
2639 __scmi_transports_setup(false);
2640 }
2641
scmi_driver_init(void)2642 static int __init scmi_driver_init(void)
2643 {
2644 int ret;
2645
2646 /* Bail out if no SCMI transport was configured */
2647 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2648 return -EINVAL;
2649
2650 scmi_bus_init();
2651
2652 /* Initialize any compiled-in transport which provided an init/exit */
2653 ret = scmi_transports_init();
2654 if (ret)
2655 return ret;
2656
2657 scmi_base_register();
2658
2659 scmi_clock_register();
2660 scmi_perf_register();
2661 scmi_power_register();
2662 scmi_reset_register();
2663 scmi_sensors_register();
2664 scmi_voltage_register();
2665 scmi_system_register();
2666 scmi_powercap_register();
2667
2668 return platform_driver_register(&scmi_driver);
2669 }
2670 subsys_initcall(scmi_driver_init);
2671
scmi_driver_exit(void)2672 static void __exit scmi_driver_exit(void)
2673 {
2674 scmi_base_unregister();
2675
2676 scmi_clock_unregister();
2677 scmi_perf_unregister();
2678 scmi_power_unregister();
2679 scmi_reset_unregister();
2680 scmi_sensors_unregister();
2681 scmi_voltage_unregister();
2682 scmi_system_unregister();
2683 scmi_powercap_unregister();
2684
2685 scmi_bus_exit();
2686
2687 scmi_transports_exit();
2688
2689 platform_driver_unregister(&scmi_driver);
2690 }
2691 module_exit(scmi_driver_exit);
2692
2693 MODULE_ALIAS("platform:arm-scmi");
2694 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2695 MODULE_DESCRIPTION("ARM SCMI protocol driver");
2696 MODULE_LICENSE("GPL v2");
2697