1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15
16 #include "vchiq_core.h"
17
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
19
20 #define VCHIQ_MSG_PADDING 0 /* - */
21 #define VCHIQ_MSG_CONNECT 1 /* - */
22 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
23 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
24 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
25 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
26 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
27 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
28 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
29 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
30 #define VCHIQ_MSG_PAUSE 10 /* - */
31 #define VCHIQ_MSG_RESUME 11 /* - */
32 #define VCHIQ_MSG_REMOTE_USE 12 /* - */
33 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
34 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
35
36 #define TYPE_SHIFT 24
37
38 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
39 #define VCHIQ_PORT_FREE 0x1000
40 #define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
41 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
42 (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
43 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
44 #define VCHIQ_MSG_SRCPORT(msgid) \
45 (unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
46 #define VCHIQ_MSG_DSTPORT(msgid) \
47 ((unsigned short)(msgid) & 0xfff)
48
49 #define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
50 #define MAKE_OPEN(srcport) \
51 ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
52 #define MAKE_OPENACK(srcport, dstport) \
53 ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
54 #define MAKE_CLOSE(srcport, dstport) \
55 ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
56 #define MAKE_DATA(srcport, dstport) \
57 ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
58 #define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
59 #define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
60 #define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
62
63 /* Ensure the fields are wide enough */
64 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
65 == 0);
66 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
67 static_assert((unsigned int)VCHIQ_PORT_MAX <
68 (unsigned int)VCHIQ_PORT_FREE);
69
70 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
71 #define VCHIQ_MSGID_CLAIMED 0x40000000
72
73 #define VCHIQ_FOURCC_INVALID 0x00000000
74 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
75
76 #define VCHIQ_BULK_ACTUAL_ABORTED -1
77
78 #if VCHIQ_ENABLE_STATS
79 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
80 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
82 (service->stats. stat += addend)
83 #else
84 #define VCHIQ_STATS_INC(state, stat) ((void)0)
85 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
87 #endif
88
89 #define HANDLE_STATE_SHIFT 12
90
91 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
92 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
93 #define SLOT_INDEX_FROM_DATA(state, data) \
94 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
95 VCHIQ_SLOT_SIZE)
96 #define SLOT_INDEX_FROM_INFO(state, info) \
97 ((unsigned int)(info - state->slot_info))
98 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
99 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
100 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
101 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
102
103 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
104
105 #define SRVTRACE_LEVEL(srv) \
106 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
107 #define SRVTRACE_ENABLED(srv, lev) \
108 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
109
110 #define NO_CLOSE_RECVD 0
111 #define CLOSE_RECVD 1
112
113 #define NO_RETRY_POLL 0
114 #define RETRY_POLL 1
115
116 struct vchiq_open_payload {
117 int fourcc;
118 int client_id;
119 short version;
120 short version_min;
121 };
122
123 struct vchiq_openack_payload {
124 short version;
125 };
126
127 enum {
128 QMFLAGS_IS_BLOCKING = BIT(0),
129 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
130 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
131 };
132
133 enum {
134 VCHIQ_POLL_TERMINATE,
135 VCHIQ_POLL_REMOVE,
136 VCHIQ_POLL_TXNOTIFY,
137 VCHIQ_POLL_RXNOTIFY,
138 VCHIQ_POLL_COUNT
139 };
140
141 /* we require this for consistency between endpoints */
142 static_assert(sizeof(struct vchiq_header) == 8);
143 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
144
check_sizes(void)145 static inline void check_sizes(void)
146 {
147 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
148 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
149 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
150 BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
151 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
152 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
153 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
154 }
155
156 /* Run time control of log level, based on KERN_XXX level. */
157 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
158 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
159 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
160
161 DEFINE_SPINLOCK(bulk_waiter_spinlock);
162 static DEFINE_SPINLOCK(quota_spinlock);
163
164 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
165 static unsigned int handle_seq;
166
167 static const char *const srvstate_names[] = {
168 "FREE",
169 "HIDDEN",
170 "LISTENING",
171 "OPENING",
172 "OPEN",
173 "OPENSYNC",
174 "CLOSESENT",
175 "CLOSERECVD",
176 "CLOSEWAIT",
177 "CLOSED"
178 };
179
180 static const char *const reason_names[] = {
181 "SERVICE_OPENED",
182 "SERVICE_CLOSED",
183 "MESSAGE_AVAILABLE",
184 "BULK_TRANSMIT_DONE",
185 "BULK_RECEIVE_DONE",
186 "BULK_TRANSMIT_ABORTED",
187 "BULK_RECEIVE_ABORTED"
188 };
189
190 static const char *const conn_state_names[] = {
191 "DISCONNECTED",
192 "CONNECTING",
193 "CONNECTED",
194 "PAUSING",
195 "PAUSE_SENT",
196 "PAUSED",
197 "RESUMING",
198 "PAUSE_TIMEOUT",
199 "RESUME_TIMEOUT"
200 };
201
202 static void
203 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
204
msg_type_str(unsigned int msg_type)205 static const char *msg_type_str(unsigned int msg_type)
206 {
207 switch (msg_type) {
208 case VCHIQ_MSG_PADDING: return "PADDING";
209 case VCHIQ_MSG_CONNECT: return "CONNECT";
210 case VCHIQ_MSG_OPEN: return "OPEN";
211 case VCHIQ_MSG_OPENACK: return "OPENACK";
212 case VCHIQ_MSG_CLOSE: return "CLOSE";
213 case VCHIQ_MSG_DATA: return "DATA";
214 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
215 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
216 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
217 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
218 case VCHIQ_MSG_PAUSE: return "PAUSE";
219 case VCHIQ_MSG_RESUME: return "RESUME";
220 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
221 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
222 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
223 }
224 return "???";
225 }
226
227 static inline void
set_service_state(struct vchiq_service * service,int newstate)228 set_service_state(struct vchiq_service *service, int newstate)
229 {
230 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
231 service->state->id, service->localport,
232 srvstate_names[service->srvstate],
233 srvstate_names[newstate]);
234 service->srvstate = newstate;
235 }
236
237 struct vchiq_service *
find_service_by_handle(unsigned int handle)238 find_service_by_handle(unsigned int handle)
239 {
240 struct vchiq_service *service;
241
242 rcu_read_lock();
243 service = handle_to_service(handle);
244 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
245 service->handle == handle &&
246 kref_get_unless_zero(&service->ref_count)) {
247 service = rcu_pointer_handoff(service);
248 rcu_read_unlock();
249 return service;
250 }
251 rcu_read_unlock();
252 vchiq_log_info(vchiq_core_log_level,
253 "Invalid service handle 0x%x", handle);
254 return NULL;
255 }
256
257 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,unsigned int localport)258 find_service_by_port(struct vchiq_state *state, unsigned int localport)
259 {
260 if (localport <= VCHIQ_PORT_MAX) {
261 struct vchiq_service *service;
262
263 rcu_read_lock();
264 service = rcu_dereference(state->services[localport]);
265 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
266 kref_get_unless_zero(&service->ref_count)) {
267 service = rcu_pointer_handoff(service);
268 rcu_read_unlock();
269 return service;
270 }
271 rcu_read_unlock();
272 }
273 vchiq_log_info(vchiq_core_log_level,
274 "Invalid port %u", localport);
275 return NULL;
276 }
277
278 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)279 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
280 {
281 struct vchiq_service *service;
282
283 rcu_read_lock();
284 service = handle_to_service(handle);
285 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
286 service->handle == handle &&
287 service->instance == instance &&
288 kref_get_unless_zero(&service->ref_count)) {
289 service = rcu_pointer_handoff(service);
290 rcu_read_unlock();
291 return service;
292 }
293 rcu_read_unlock();
294 vchiq_log_info(vchiq_core_log_level,
295 "Invalid service handle 0x%x", handle);
296 return NULL;
297 }
298
299 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)300 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
301 {
302 struct vchiq_service *service;
303
304 rcu_read_lock();
305 service = handle_to_service(handle);
306 if (service &&
307 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
308 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
309 service->handle == handle &&
310 service->instance == instance &&
311 kref_get_unless_zero(&service->ref_count)) {
312 service = rcu_pointer_handoff(service);
313 rcu_read_unlock();
314 return service;
315 }
316 rcu_read_unlock();
317 vchiq_log_info(vchiq_core_log_level,
318 "Invalid service handle 0x%x", handle);
319 return service;
320 }
321
322 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)323 __next_service_by_instance(struct vchiq_state *state,
324 struct vchiq_instance *instance,
325 int *pidx)
326 {
327 struct vchiq_service *service = NULL;
328 int idx = *pidx;
329
330 while (idx < state->unused_service) {
331 struct vchiq_service *srv;
332
333 srv = rcu_dereference(state->services[idx]);
334 idx++;
335 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
336 srv->instance == instance) {
337 service = srv;
338 break;
339 }
340 }
341
342 *pidx = idx;
343 return service;
344 }
345
346 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)347 next_service_by_instance(struct vchiq_state *state,
348 struct vchiq_instance *instance,
349 int *pidx)
350 {
351 struct vchiq_service *service;
352
353 rcu_read_lock();
354 while (1) {
355 service = __next_service_by_instance(state, instance, pidx);
356 if (!service)
357 break;
358 if (kref_get_unless_zero(&service->ref_count)) {
359 service = rcu_pointer_handoff(service);
360 break;
361 }
362 }
363 rcu_read_unlock();
364 return service;
365 }
366
367 void
vchiq_service_get(struct vchiq_service * service)368 vchiq_service_get(struct vchiq_service *service)
369 {
370 if (!service) {
371 WARN(1, "%s service is NULL\n", __func__);
372 return;
373 }
374 kref_get(&service->ref_count);
375 }
376
service_release(struct kref * kref)377 static void service_release(struct kref *kref)
378 {
379 struct vchiq_service *service =
380 container_of(kref, struct vchiq_service, ref_count);
381 struct vchiq_state *state = service->state;
382
383 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
384 rcu_assign_pointer(state->services[service->localport], NULL);
385 if (service->userdata_term)
386 service->userdata_term(service->base.userdata);
387 kfree_rcu(service, rcu);
388 }
389
390 void
vchiq_service_put(struct vchiq_service * service)391 vchiq_service_put(struct vchiq_service *service)
392 {
393 if (!service) {
394 WARN(1, "%s: service is NULL\n", __func__);
395 return;
396 }
397 kref_put(&service->ref_count, service_release);
398 }
399
400 int
vchiq_get_client_id(unsigned int handle)401 vchiq_get_client_id(unsigned int handle)
402 {
403 struct vchiq_service *service;
404 int id;
405
406 rcu_read_lock();
407 service = handle_to_service(handle);
408 id = service ? service->client_id : 0;
409 rcu_read_unlock();
410 return id;
411 }
412
413 void *
vchiq_get_service_userdata(unsigned int handle)414 vchiq_get_service_userdata(unsigned int handle)
415 {
416 void *userdata;
417 struct vchiq_service *service;
418
419 rcu_read_lock();
420 service = handle_to_service(handle);
421 userdata = service ? service->base.userdata : NULL;
422 rcu_read_unlock();
423 return userdata;
424 }
425 EXPORT_SYMBOL(vchiq_get_service_userdata);
426
427 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)428 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
429 {
430 struct vchiq_state *state = service->state;
431 struct vchiq_service_quota *quota;
432
433 service->closing = 1;
434
435 /* Synchronise with other threads. */
436 mutex_lock(&state->recycle_mutex);
437 mutex_unlock(&state->recycle_mutex);
438 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
439 /*
440 * If we're pausing then the slot_mutex is held until resume
441 * by the slot handler. Therefore don't try to acquire this
442 * mutex if we're the slot handler and in the pause sent state.
443 * We don't need to in this case anyway.
444 */
445 mutex_lock(&state->slot_mutex);
446 mutex_unlock(&state->slot_mutex);
447 }
448
449 /* Unblock any sending thread. */
450 quota = &state->service_quotas[service->localport];
451 complete("a->quota_event);
452 }
453
454 static void
mark_service_closing(struct vchiq_service * service)455 mark_service_closing(struct vchiq_service *service)
456 {
457 mark_service_closing_internal(service, 0);
458 }
459
460 static inline enum vchiq_status
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)461 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
462 struct vchiq_header *header, void *bulk_userdata)
463 {
464 enum vchiq_status status;
465
466 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
467 service->state->id, service->localport, reason_names[reason],
468 header, bulk_userdata);
469 status = service->base.callback(reason, header, service->handle, bulk_userdata);
470 if (status == VCHIQ_ERROR) {
471 vchiq_log_warning(vchiq_core_log_level,
472 "%d: ignoring ERROR from callback to service %x",
473 service->state->id, service->handle);
474 status = VCHIQ_SUCCESS;
475 }
476
477 if (reason != VCHIQ_MESSAGE_AVAILABLE)
478 vchiq_release_message(service->handle, header);
479
480 return status;
481 }
482
483 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)484 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
485 {
486 enum vchiq_connstate oldstate = state->conn_state;
487
488 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
489 conn_state_names[newstate]);
490 state->conn_state = newstate;
491 vchiq_platform_conn_state_changed(state, oldstate, newstate);
492 }
493
494 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)495 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
496 {
497 event->armed = 0;
498 /*
499 * Don't clear the 'fired' flag because it may already have been set
500 * by the other side.
501 */
502 init_waitqueue_head(wq);
503 }
504
505 /*
506 * All the event waiting routines in VCHIQ used a custom semaphore
507 * implementation that filtered most signals. This achieved a behaviour similar
508 * to the "killable" family of functions. While cleaning up this code all the
509 * routines where switched to the "interruptible" family of functions, as the
510 * former was deemed unjustified and the use "killable" set all VCHIQ's
511 * threads in D state.
512 */
513 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)514 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
515 {
516 if (!event->fired) {
517 event->armed = 1;
518 dsb(sy);
519 if (wait_event_interruptible(*wq, event->fired)) {
520 event->armed = 0;
521 return 0;
522 }
523 event->armed = 0;
524 wmb();
525 }
526
527 event->fired = 0;
528 return 1;
529 }
530
531 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)532 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
533 {
534 event->fired = 1;
535 event->armed = 0;
536 wake_up_all(wq);
537 }
538
539 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)540 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
541 {
542 if (event->fired && event->armed)
543 remote_event_signal_local(wq, event);
544 }
545
546 void
remote_event_pollall(struct vchiq_state * state)547 remote_event_pollall(struct vchiq_state *state)
548 {
549 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
550 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
551 remote_event_poll(&state->trigger_event, &state->local->trigger);
552 remote_event_poll(&state->recycle_event, &state->local->recycle);
553 }
554
555 /*
556 * Round up message sizes so that any space at the end of a slot is always big
557 * enough for a header. This relies on header size being a power of two, which
558 * has been verified earlier by a static assertion.
559 */
560
561 static inline size_t
calc_stride(size_t size)562 calc_stride(size_t size)
563 {
564 /* Allow room for the header */
565 size += sizeof(struct vchiq_header);
566
567 /* Round up */
568 return (size + sizeof(struct vchiq_header) - 1) &
569 ~(sizeof(struct vchiq_header) - 1);
570 }
571
572 /* Called by the slot handler thread */
573 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)574 get_listening_service(struct vchiq_state *state, int fourcc)
575 {
576 int i;
577
578 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
579
580 rcu_read_lock();
581 for (i = 0; i < state->unused_service; i++) {
582 struct vchiq_service *service;
583
584 service = rcu_dereference(state->services[i]);
585 if (service &&
586 service->public_fourcc == fourcc &&
587 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
588 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
589 service->remoteport == VCHIQ_PORT_FREE)) &&
590 kref_get_unless_zero(&service->ref_count)) {
591 service = rcu_pointer_handoff(service);
592 rcu_read_unlock();
593 return service;
594 }
595 }
596 rcu_read_unlock();
597 return NULL;
598 }
599
600 /* Called by the slot handler thread */
601 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)602 get_connected_service(struct vchiq_state *state, unsigned int port)
603 {
604 int i;
605
606 rcu_read_lock();
607 for (i = 0; i < state->unused_service; i++) {
608 struct vchiq_service *service =
609 rcu_dereference(state->services[i]);
610
611 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
612 service->remoteport == port &&
613 kref_get_unless_zero(&service->ref_count)) {
614 service = rcu_pointer_handoff(service);
615 rcu_read_unlock();
616 return service;
617 }
618 }
619 rcu_read_unlock();
620 return NULL;
621 }
622
623 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)624 request_poll(struct vchiq_state *state, struct vchiq_service *service,
625 int poll_type)
626 {
627 u32 value;
628 int index;
629
630 if (!service)
631 goto skip_service;
632
633 do {
634 value = atomic_read(&service->poll_flags);
635 } while (atomic_cmpxchg(&service->poll_flags, value,
636 value | BIT(poll_type)) != value);
637
638 index = BITSET_WORD(service->localport);
639 do {
640 value = atomic_read(&state->poll_services[index]);
641 } while (atomic_cmpxchg(&state->poll_services[index],
642 value, value | BIT(service->localport & 0x1f)) != value);
643
644 skip_service:
645 state->poll_needed = 1;
646 wmb();
647
648 /* ... and ensure the slot handler runs. */
649 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
650 }
651
652 /*
653 * Called from queue_message, by the slot handler and application threads,
654 * with slot_mutex held
655 */
656 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)657 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
658 {
659 struct vchiq_shared_state *local = state->local;
660 int tx_pos = state->local_tx_pos;
661 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
662
663 if (space > slot_space) {
664 struct vchiq_header *header;
665 /* Fill the remaining space with padding */
666 WARN_ON(!state->tx_data);
667 header = (struct vchiq_header *)
668 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
669 header->msgid = VCHIQ_MSGID_PADDING;
670 header->size = slot_space - sizeof(struct vchiq_header);
671
672 tx_pos += slot_space;
673 }
674
675 /* If necessary, get the next slot. */
676 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
677 int slot_index;
678
679 /* If there is no free slot... */
680
681 if (!try_wait_for_completion(&state->slot_available_event)) {
682 /* ...wait for one. */
683
684 VCHIQ_STATS_INC(state, slot_stalls);
685
686 /* But first, flush through the last slot. */
687 state->local_tx_pos = tx_pos;
688 local->tx_pos = tx_pos;
689 remote_event_signal(&state->remote->trigger);
690
691 if (!is_blocking ||
692 (wait_for_completion_interruptible(&state->slot_available_event)))
693 return NULL; /* No space available */
694 }
695
696 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
697 complete(&state->slot_available_event);
698 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
699 return NULL;
700 }
701
702 slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
703 state->tx_data =
704 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
705 }
706
707 state->local_tx_pos = tx_pos + space;
708
709 return (struct vchiq_header *)(state->tx_data +
710 (tx_pos & VCHIQ_SLOT_MASK));
711 }
712
713 static void
process_free_data_message(struct vchiq_state * state,u32 * service_found,struct vchiq_header * header)714 process_free_data_message(struct vchiq_state *state, u32 *service_found,
715 struct vchiq_header *header)
716 {
717 int msgid = header->msgid;
718 int port = VCHIQ_MSG_SRCPORT(msgid);
719 struct vchiq_service_quota *quota = &state->service_quotas[port];
720 int count;
721
722 spin_lock("a_spinlock);
723 count = quota->message_use_count;
724 if (count > 0)
725 quota->message_use_count = count - 1;
726 spin_unlock("a_spinlock);
727
728 if (count == quota->message_quota) {
729 /*
730 * Signal the service that it
731 * has dropped below its quota
732 */
733 complete("a->quota_event);
734 } else if (count == 0) {
735 vchiq_log_error(vchiq_core_log_level,
736 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
737 port, quota->message_use_count, header, msgid, header->msgid,
738 header->size);
739 WARN(1, "invalid message use count\n");
740 }
741 if (!BITSET_IS_SET(service_found, port)) {
742 /* Set the found bit for this service */
743 BITSET_SET(service_found, port);
744
745 spin_lock("a_spinlock);
746 count = quota->slot_use_count;
747 if (count > 0)
748 quota->slot_use_count = count - 1;
749 spin_unlock("a_spinlock);
750
751 if (count > 0) {
752 /*
753 * Signal the service in case
754 * it has dropped below its quota
755 */
756 complete("a->quota_event);
757 vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
758 state->id, port, header->size, header, count - 1);
759 } else {
760 vchiq_log_error(vchiq_core_log_level,
761 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
762 port, count, header, msgid, header->msgid, header->size);
763 WARN(1, "bad slot use count\n");
764 }
765 }
766 }
767
768 /* Called by the recycle thread. */
769 static void
process_free_queue(struct vchiq_state * state,u32 * service_found,size_t length)770 process_free_queue(struct vchiq_state *state, u32 *service_found,
771 size_t length)
772 {
773 struct vchiq_shared_state *local = state->local;
774 int slot_queue_available;
775
776 /*
777 * Find slots which have been freed by the other side, and return them
778 * to the available queue.
779 */
780 slot_queue_available = state->slot_queue_available;
781
782 /*
783 * Use a memory barrier to ensure that any state that may have been
784 * modified by another thread is not masked by stale prefetched
785 * values.
786 */
787 mb();
788
789 while (slot_queue_available != local->slot_queue_recycle) {
790 unsigned int pos;
791 int slot_index = local->slot_queue[slot_queue_available &
792 VCHIQ_SLOT_QUEUE_MASK];
793 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
794 int data_found = 0;
795
796 slot_queue_available++;
797 /*
798 * Beware of the address dependency - data is calculated
799 * using an index written by the other side.
800 */
801 rmb();
802
803 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
804 state->id, slot_index, data, local->slot_queue_recycle,
805 slot_queue_available);
806
807 /* Initialise the bitmask for services which have used this slot */
808 memset(service_found, 0, length);
809
810 pos = 0;
811
812 while (pos < VCHIQ_SLOT_SIZE) {
813 struct vchiq_header *header =
814 (struct vchiq_header *)(data + pos);
815 int msgid = header->msgid;
816
817 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
818 process_free_data_message(state, service_found,
819 header);
820 data_found = 1;
821 }
822
823 pos += calc_stride(header->size);
824 if (pos > VCHIQ_SLOT_SIZE) {
825 vchiq_log_error(vchiq_core_log_level,
826 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
827 pos, header, msgid, header->msgid, header->size);
828 WARN(1, "invalid slot position\n");
829 }
830 }
831
832 if (data_found) {
833 int count;
834
835 spin_lock("a_spinlock);
836 count = state->data_use_count;
837 if (count > 0)
838 state->data_use_count = count - 1;
839 spin_unlock("a_spinlock);
840 if (count == state->data_quota)
841 complete(&state->data_quota_event);
842 }
843
844 /*
845 * Don't allow the slot to be reused until we are no
846 * longer interested in it.
847 */
848 mb();
849
850 state->slot_queue_available = slot_queue_available;
851 complete(&state->slot_available_event);
852 }
853 }
854
855 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)856 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
857 {
858 memcpy(dest + offset, context + offset, maxsize);
859 return maxsize;
860 }
861
862 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)863 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
864 size_t maxsize),
865 void *context,
866 void *dest,
867 size_t size)
868 {
869 size_t pos = 0;
870
871 while (pos < size) {
872 ssize_t callback_result;
873 size_t max_bytes = size - pos;
874
875 callback_result = copy_callback(context, dest + pos, pos,
876 max_bytes);
877
878 if (callback_result < 0)
879 return callback_result;
880
881 if (!callback_result)
882 return -EIO;
883
884 if (callback_result > max_bytes)
885 return -EIO;
886
887 pos += callback_result;
888 }
889
890 return size;
891 }
892
893 /* Called by the slot handler and application threads */
894 static enum vchiq_status
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)895 queue_message(struct vchiq_state *state, struct vchiq_service *service,
896 int msgid,
897 ssize_t (*copy_callback)(void *context, void *dest,
898 size_t offset, size_t maxsize),
899 void *context, size_t size, int flags)
900 {
901 struct vchiq_shared_state *local;
902 struct vchiq_service_quota *quota = NULL;
903 struct vchiq_header *header;
904 int type = VCHIQ_MSG_TYPE(msgid);
905
906 size_t stride;
907
908 local = state->local;
909
910 stride = calc_stride(size);
911
912 WARN_ON(stride > VCHIQ_SLOT_SIZE);
913
914 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
915 mutex_lock_killable(&state->slot_mutex))
916 return VCHIQ_RETRY;
917
918 if (type == VCHIQ_MSG_DATA) {
919 int tx_end_index;
920
921 if (!service) {
922 WARN(1, "%s: service is NULL\n", __func__);
923 mutex_unlock(&state->slot_mutex);
924 return VCHIQ_ERROR;
925 }
926
927 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
928 QMFLAGS_NO_MUTEX_UNLOCK));
929
930 if (service->closing) {
931 /* The service has been closed */
932 mutex_unlock(&state->slot_mutex);
933 return VCHIQ_ERROR;
934 }
935
936 quota = &state->service_quotas[service->localport];
937
938 spin_lock("a_spinlock);
939
940 /*
941 * Ensure this service doesn't use more than its quota of
942 * messages or slots
943 */
944 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
945
946 /*
947 * Ensure data messages don't use more than their quota of
948 * slots
949 */
950 while ((tx_end_index != state->previous_data_index) &&
951 (state->data_use_count == state->data_quota)) {
952 VCHIQ_STATS_INC(state, data_stalls);
953 spin_unlock("a_spinlock);
954 mutex_unlock(&state->slot_mutex);
955
956 if (wait_for_completion_interruptible(&state->data_quota_event))
957 return VCHIQ_RETRY;
958
959 mutex_lock(&state->slot_mutex);
960 spin_lock("a_spinlock);
961 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
962 if ((tx_end_index == state->previous_data_index) ||
963 (state->data_use_count < state->data_quota)) {
964 /* Pass the signal on to other waiters */
965 complete(&state->data_quota_event);
966 break;
967 }
968 }
969
970 while ((quota->message_use_count == quota->message_quota) ||
971 ((tx_end_index != quota->previous_tx_index) &&
972 (quota->slot_use_count == quota->slot_quota))) {
973 spin_unlock("a_spinlock);
974 vchiq_log_trace(vchiq_core_log_level,
975 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
976 state->id, service->localport, msg_type_str(type), size,
977 quota->message_use_count, quota->slot_use_count);
978 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
979 mutex_unlock(&state->slot_mutex);
980 if (wait_for_completion_interruptible("a->quota_event))
981 return VCHIQ_RETRY;
982 if (service->closing)
983 return VCHIQ_ERROR;
984 if (mutex_lock_killable(&state->slot_mutex))
985 return VCHIQ_RETRY;
986 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
987 /* The service has been closed */
988 mutex_unlock(&state->slot_mutex);
989 return VCHIQ_ERROR;
990 }
991 spin_lock("a_spinlock);
992 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
993 }
994
995 spin_unlock("a_spinlock);
996 }
997
998 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
999
1000 if (!header) {
1001 if (service)
1002 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1003 /*
1004 * In the event of a failure, return the mutex to the
1005 * state it was in
1006 */
1007 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1008 mutex_unlock(&state->slot_mutex);
1009 return VCHIQ_RETRY;
1010 }
1011
1012 if (type == VCHIQ_MSG_DATA) {
1013 ssize_t callback_result;
1014 int tx_end_index;
1015 int slot_use_count;
1016
1017 vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1018 msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1019 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1020
1021 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1022 QMFLAGS_NO_MUTEX_UNLOCK));
1023
1024 callback_result =
1025 copy_message_data(copy_callback, context,
1026 header->data, size);
1027
1028 if (callback_result < 0) {
1029 mutex_unlock(&state->slot_mutex);
1030 VCHIQ_SERVICE_STATS_INC(service, error_count);
1031 return VCHIQ_ERROR;
1032 }
1033
1034 if (SRVTRACE_ENABLED(service,
1035 VCHIQ_LOG_INFO))
1036 vchiq_log_dump_mem("Sent", 0,
1037 header->data,
1038 min_t(size_t, 16, callback_result));
1039
1040 spin_lock("a_spinlock);
1041 quota->message_use_count++;
1042
1043 tx_end_index =
1044 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1045
1046 /*
1047 * If this transmission can't fit in the last slot used by any
1048 * service, the data_use_count must be increased.
1049 */
1050 if (tx_end_index != state->previous_data_index) {
1051 state->previous_data_index = tx_end_index;
1052 state->data_use_count++;
1053 }
1054
1055 /*
1056 * If this isn't the same slot last used by this service,
1057 * the service's slot_use_count must be increased.
1058 */
1059 if (tx_end_index != quota->previous_tx_index) {
1060 quota->previous_tx_index = tx_end_index;
1061 slot_use_count = ++quota->slot_use_count;
1062 } else {
1063 slot_use_count = 0;
1064 }
1065
1066 spin_unlock("a_spinlock);
1067
1068 if (slot_use_count)
1069 vchiq_log_trace(vchiq_core_log_level,
1070 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
1071 service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1072 size, slot_use_count, header);
1073
1074 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1075 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1076 } else {
1077 vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1078 msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1079 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1080 if (size != 0) {
1081 /*
1082 * It is assumed for now that this code path
1083 * only happens from calls inside this file.
1084 *
1085 * External callers are through the vchiq_queue_message
1086 * path which always sets the type to be VCHIQ_MSG_DATA
1087 *
1088 * At first glance this appears to be correct but
1089 * more review is needed.
1090 */
1091 copy_message_data(copy_callback, context,
1092 header->data, size);
1093 }
1094 VCHIQ_STATS_INC(state, ctrl_tx_count);
1095 }
1096
1097 header->msgid = msgid;
1098 header->size = size;
1099
1100 {
1101 int svc_fourcc;
1102
1103 svc_fourcc = service
1104 ? service->base.fourcc
1105 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1106
1107 vchiq_log_info(SRVTRACE_LEVEL(service),
1108 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1109 msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1110 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1111 VCHIQ_MSG_DSTPORT(msgid), size);
1112 }
1113
1114 /* Make sure the new header is visible to the peer. */
1115 wmb();
1116
1117 /* Make the new tx_pos visible to the peer. */
1118 local->tx_pos = state->local_tx_pos;
1119 wmb();
1120
1121 if (service && (type == VCHIQ_MSG_CLOSE))
1122 set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1123
1124 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1125 mutex_unlock(&state->slot_mutex);
1126
1127 remote_event_signal(&state->remote->trigger);
1128
1129 return VCHIQ_SUCCESS;
1130 }
1131
1132 /* Called by the slot handler and application threads */
1133 static enum vchiq_status
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1134 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1135 int msgid,
1136 ssize_t (*copy_callback)(void *context, void *dest,
1137 size_t offset, size_t maxsize),
1138 void *context, int size, int is_blocking)
1139 {
1140 struct vchiq_shared_state *local;
1141 struct vchiq_header *header;
1142 ssize_t callback_result;
1143
1144 local = state->local;
1145
1146 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1147 mutex_lock_killable(&state->sync_mutex))
1148 return VCHIQ_RETRY;
1149
1150 remote_event_wait(&state->sync_release_event, &local->sync_release);
1151
1152 rmb();
1153
1154 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1155 local->slot_sync);
1156
1157 {
1158 int oldmsgid = header->msgid;
1159
1160 if (oldmsgid != VCHIQ_MSGID_PADDING)
1161 vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
1162 state->id, oldmsgid);
1163 }
1164
1165 vchiq_log_info(vchiq_sync_log_level,
1166 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1167 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1168 header, size, VCHIQ_MSG_SRCPORT(msgid),
1169 VCHIQ_MSG_DSTPORT(msgid));
1170
1171 callback_result =
1172 copy_message_data(copy_callback, context,
1173 header->data, size);
1174
1175 if (callback_result < 0) {
1176 mutex_unlock(&state->slot_mutex);
1177 VCHIQ_SERVICE_STATS_INC(service, error_count);
1178 return VCHIQ_ERROR;
1179 }
1180
1181 if (service) {
1182 if (SRVTRACE_ENABLED(service,
1183 VCHIQ_LOG_INFO))
1184 vchiq_log_dump_mem("Sent", 0,
1185 header->data,
1186 min_t(size_t, 16, callback_result));
1187
1188 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1189 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1190 } else {
1191 VCHIQ_STATS_INC(state, ctrl_tx_count);
1192 }
1193
1194 header->size = size;
1195 header->msgid = msgid;
1196
1197 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1198 int svc_fourcc;
1199
1200 svc_fourcc = service
1201 ? service->base.fourcc
1202 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1203
1204 vchiq_log_trace(vchiq_sync_log_level,
1205 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1206 msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1207 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1208 VCHIQ_MSG_DSTPORT(msgid), size);
1209 }
1210
1211 remote_event_signal(&state->remote->sync_trigger);
1212
1213 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1214 mutex_unlock(&state->sync_mutex);
1215
1216 return VCHIQ_SUCCESS;
1217 }
1218
1219 static inline void
claim_slot(struct vchiq_slot_info * slot)1220 claim_slot(struct vchiq_slot_info *slot)
1221 {
1222 slot->use_count++;
1223 }
1224
1225 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1226 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1227 struct vchiq_header *header, struct vchiq_service *service)
1228 {
1229 mutex_lock(&state->recycle_mutex);
1230
1231 if (header) {
1232 int msgid = header->msgid;
1233
1234 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1235 mutex_unlock(&state->recycle_mutex);
1236 return;
1237 }
1238
1239 /* Rewrite the message header to prevent a double release */
1240 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1241 }
1242
1243 slot_info->release_count++;
1244
1245 if (slot_info->release_count == slot_info->use_count) {
1246 int slot_queue_recycle;
1247 /* Add to the freed queue */
1248
1249 /*
1250 * A read barrier is necessary here to prevent speculative
1251 * fetches of remote->slot_queue_recycle from overtaking the
1252 * mutex.
1253 */
1254 rmb();
1255
1256 slot_queue_recycle = state->remote->slot_queue_recycle;
1257 state->remote->slot_queue[slot_queue_recycle &
1258 VCHIQ_SLOT_QUEUE_MASK] =
1259 SLOT_INDEX_FROM_INFO(state, slot_info);
1260 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1261 vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
1262 SLOT_INDEX_FROM_INFO(state, slot_info),
1263 state->remote->slot_queue_recycle);
1264
1265 /*
1266 * A write barrier is necessary, but remote_event_signal
1267 * contains one.
1268 */
1269 remote_event_signal(&state->remote->recycle);
1270 }
1271
1272 mutex_unlock(&state->recycle_mutex);
1273 }
1274
1275 static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk * bulk)1276 get_bulk_reason(struct vchiq_bulk *bulk)
1277 {
1278 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1279 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1280 return VCHIQ_BULK_TRANSMIT_ABORTED;
1281
1282 return VCHIQ_BULK_TRANSMIT_DONE;
1283 }
1284
1285 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1286 return VCHIQ_BULK_RECEIVE_ABORTED;
1287
1288 return VCHIQ_BULK_RECEIVE_DONE;
1289 }
1290
1291 /* Called by the slot handler - don't hold the bulk mutex */
1292 static enum vchiq_status
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1293 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1294 int retry_poll)
1295 {
1296 enum vchiq_status status = VCHIQ_SUCCESS;
1297
1298 vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
1299 service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
1300 queue->process, queue->remote_notify, queue->remove);
1301
1302 queue->remote_notify = queue->process;
1303
1304 while (queue->remove != queue->remote_notify) {
1305 struct vchiq_bulk *bulk =
1306 &queue->bulks[BULK_INDEX(queue->remove)];
1307
1308 /*
1309 * Only generate callbacks for non-dummy bulk
1310 * requests, and non-terminated services
1311 */
1312 if (bulk->data && service->instance) {
1313 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1314 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1315 VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1316 VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1317 bulk->actual);
1318 } else {
1319 VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1320 VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1321 bulk->actual);
1322 }
1323 } else {
1324 VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1325 }
1326 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1327 struct bulk_waiter *waiter;
1328
1329 spin_lock(&bulk_waiter_spinlock);
1330 waiter = bulk->userdata;
1331 if (waiter) {
1332 waiter->actual = bulk->actual;
1333 complete(&waiter->event);
1334 }
1335 spin_unlock(&bulk_waiter_spinlock);
1336 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1337 enum vchiq_reason reason =
1338 get_bulk_reason(bulk);
1339 status = make_service_callback(service, reason, NULL,
1340 bulk->userdata);
1341 if (status == VCHIQ_RETRY)
1342 break;
1343 }
1344 }
1345
1346 queue->remove++;
1347 complete(&service->bulk_remove_event);
1348 }
1349 if (!retry_poll)
1350 status = VCHIQ_SUCCESS;
1351
1352 if (status == VCHIQ_RETRY)
1353 request_poll(service->state, service, (queue == &service->bulk_tx) ?
1354 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1355
1356 return status;
1357 }
1358
1359 static void
poll_services_of_group(struct vchiq_state * state,int group)1360 poll_services_of_group(struct vchiq_state *state, int group)
1361 {
1362 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1363 int i;
1364
1365 for (i = 0; flags; i++) {
1366 struct vchiq_service *service;
1367 u32 service_flags;
1368
1369 if ((flags & BIT(i)) == 0)
1370 continue;
1371
1372 service = find_service_by_port(state, (group << 5) + i);
1373 flags &= ~BIT(i);
1374
1375 if (!service)
1376 continue;
1377
1378 service_flags = atomic_xchg(&service->poll_flags, 0);
1379 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1380 vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1381 state->id, service->localport,
1382 service->remoteport);
1383
1384 /*
1385 * Make it look like a client, because
1386 * it must be removed and not left in
1387 * the LISTENING state.
1388 */
1389 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1390
1391 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1392 VCHIQ_SUCCESS)
1393 request_poll(state, service, VCHIQ_POLL_REMOVE);
1394 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1395 vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
1396 state->id, service->localport, service->remoteport);
1397 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) != VCHIQ_SUCCESS)
1398 request_poll(state, service, VCHIQ_POLL_TERMINATE);
1399 }
1400 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1401 notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1402 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1403 notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1404 vchiq_service_put(service);
1405 }
1406 }
1407
1408 /* Called by the slot handler thread */
1409 static void
poll_services(struct vchiq_state * state)1410 poll_services(struct vchiq_state *state)
1411 {
1412 int group;
1413
1414 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1415 poll_services_of_group(state, group);
1416 }
1417
1418 /* Called with the bulk_mutex held */
1419 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1420 abort_outstanding_bulks(struct vchiq_service *service,
1421 struct vchiq_bulk_queue *queue)
1422 {
1423 int is_tx = (queue == &service->bulk_tx);
1424
1425 vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
1426 service->state->id, service->localport, is_tx ? 't' : 'r',
1427 queue->local_insert, queue->remote_insert, queue->process);
1428
1429 WARN_ON((int)(queue->local_insert - queue->process) < 0);
1430 WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1431
1432 while ((queue->process != queue->local_insert) ||
1433 (queue->process != queue->remote_insert)) {
1434 struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1435
1436 if (queue->process == queue->remote_insert) {
1437 /* fabricate a matching dummy bulk */
1438 bulk->remote_data = NULL;
1439 bulk->remote_size = 0;
1440 queue->remote_insert++;
1441 }
1442
1443 if (queue->process != queue->local_insert) {
1444 vchiq_complete_bulk(bulk);
1445
1446 vchiq_log_info(SRVTRACE_LEVEL(service),
1447 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1448 is_tx ? "Send Bulk to" : "Recv Bulk from",
1449 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1450 service->remoteport, bulk->size, bulk->remote_size);
1451 } else {
1452 /* fabricate a matching dummy bulk */
1453 bulk->data = 0;
1454 bulk->size = 0;
1455 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1456 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1457 VCHIQ_BULK_RECEIVE;
1458 queue->local_insert++;
1459 }
1460
1461 queue->process++;
1462 }
1463 }
1464
1465 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1466 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1467 {
1468 const struct vchiq_open_payload *payload;
1469 struct vchiq_service *service = NULL;
1470 int msgid, size;
1471 unsigned int localport, remoteport, fourcc;
1472 short version, version_min;
1473
1474 msgid = header->msgid;
1475 size = header->size;
1476 localport = VCHIQ_MSG_DSTPORT(msgid);
1477 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1478 if (size < sizeof(struct vchiq_open_payload))
1479 goto fail_open;
1480
1481 payload = (struct vchiq_open_payload *)header->data;
1482 fourcc = payload->fourcc;
1483 vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1484 state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
1485
1486 service = get_listening_service(state, fourcc);
1487 if (!service)
1488 goto fail_open;
1489
1490 /* A matching service exists */
1491 version = payload->version;
1492 version_min = payload->version_min;
1493
1494 if ((service->version < version_min) || (version < service->version_min)) {
1495 /* Version mismatch */
1496 vchiq_loud_error_header();
1497 vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1498 state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
1499 service->version, service->version_min, version, version_min);
1500 vchiq_loud_error_footer();
1501 vchiq_service_put(service);
1502 service = NULL;
1503 goto fail_open;
1504 }
1505 service->peer_version = version;
1506
1507 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1508 struct vchiq_openack_payload ack_payload = {
1509 service->version
1510 };
1511 int openack_id = MAKE_OPENACK(service->localport, remoteport);
1512
1513 if (state->version_common <
1514 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1515 service->sync = 0;
1516
1517 /* Acknowledge the OPEN */
1518 if (service->sync) {
1519 if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
1520 &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
1521 goto bail_not_ready;
1522
1523 /* The service is now open */
1524 set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
1525 } else {
1526 if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
1527 &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
1528 goto bail_not_ready;
1529
1530 /* The service is now open */
1531 set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1532 }
1533 }
1534
1535 /* Success - the message has been dealt with */
1536 vchiq_service_put(service);
1537 return 1;
1538
1539 fail_open:
1540 /* No available service, or an invalid request - send a CLOSE */
1541 if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1542 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1543 goto bail_not_ready;
1544
1545 return 1;
1546
1547 bail_not_ready:
1548 if (service)
1549 vchiq_service_put(service);
1550
1551 return 0;
1552 }
1553
1554 /**
1555 * parse_message() - parses a single message from the rx slot
1556 * @state: vchiq state struct
1557 * @header: message header
1558 *
1559 * Context: Process context
1560 *
1561 * Return:
1562 * * >= 0 - size of the parsed message payload (without header)
1563 * * -EINVAL - fatal error occurred, bail out is required
1564 */
1565 static int
parse_message(struct vchiq_state * state,struct vchiq_header * header)1566 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1567 {
1568 struct vchiq_service *service = NULL;
1569 unsigned int localport, remoteport;
1570 int msgid, size, type, ret = -EINVAL;
1571
1572 DEBUG_INITIALISE(state->local);
1573
1574 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1575 msgid = header->msgid;
1576 DEBUG_VALUE(PARSE_MSGID, msgid);
1577 size = header->size;
1578 type = VCHIQ_MSG_TYPE(msgid);
1579 localport = VCHIQ_MSG_DSTPORT(msgid);
1580 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1581
1582 if (type != VCHIQ_MSG_DATA)
1583 VCHIQ_STATS_INC(state, ctrl_rx_count);
1584
1585 switch (type) {
1586 case VCHIQ_MSG_OPENACK:
1587 case VCHIQ_MSG_CLOSE:
1588 case VCHIQ_MSG_DATA:
1589 case VCHIQ_MSG_BULK_RX:
1590 case VCHIQ_MSG_BULK_TX:
1591 case VCHIQ_MSG_BULK_RX_DONE:
1592 case VCHIQ_MSG_BULK_TX_DONE:
1593 service = find_service_by_port(state, localport);
1594 if ((!service ||
1595 ((service->remoteport != remoteport) &&
1596 (service->remoteport != VCHIQ_PORT_FREE))) &&
1597 (localport == 0) &&
1598 (type == VCHIQ_MSG_CLOSE)) {
1599 /*
1600 * This could be a CLOSE from a client which
1601 * hadn't yet received the OPENACK - look for
1602 * the connected service
1603 */
1604 if (service)
1605 vchiq_service_put(service);
1606 service = get_connected_service(state, remoteport);
1607 if (service)
1608 vchiq_log_warning(vchiq_core_log_level,
1609 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1610 state->id, msg_type_str(type), header,
1611 remoteport, localport, service->localport);
1612 }
1613
1614 if (!service) {
1615 vchiq_log_error(vchiq_core_log_level,
1616 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1617 state->id, msg_type_str(type), header, remoteport,
1618 localport, localport);
1619 goto skip_message;
1620 }
1621 break;
1622 default:
1623 break;
1624 }
1625
1626 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1627 int svc_fourcc;
1628
1629 svc_fourcc = service
1630 ? service->base.fourcc
1631 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1632 vchiq_log_info(SRVTRACE_LEVEL(service),
1633 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1634 msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1635 remoteport, localport, size);
1636 if (size > 0)
1637 vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
1638 }
1639
1640 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1641 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1642 vchiq_log_error(vchiq_core_log_level,
1643 "header %pK (msgid %x) - size %x too big for slot",
1644 header, (unsigned int)msgid, (unsigned int)size);
1645 WARN(1, "oversized for slot\n");
1646 }
1647
1648 switch (type) {
1649 case VCHIQ_MSG_OPEN:
1650 WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1651 if (!parse_open(state, header))
1652 goto bail_not_ready;
1653 break;
1654 case VCHIQ_MSG_OPENACK:
1655 if (size >= sizeof(struct vchiq_openack_payload)) {
1656 const struct vchiq_openack_payload *payload =
1657 (struct vchiq_openack_payload *)
1658 header->data;
1659 service->peer_version = payload->version;
1660 }
1661 vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1662 state->id, header, size, remoteport, localport,
1663 service->peer_version);
1664 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1665 service->remoteport = remoteport;
1666 set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1667 complete(&service->remove_event);
1668 } else {
1669 vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
1670 srvstate_names[service->srvstate]);
1671 }
1672 break;
1673 case VCHIQ_MSG_CLOSE:
1674 WARN_ON(size); /* There should be no data */
1675
1676 vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
1677 state->id, header, remoteport, localport);
1678
1679 mark_service_closing_internal(service, 1);
1680
1681 if (vchiq_close_service_internal(service, CLOSE_RECVD) == VCHIQ_RETRY)
1682 goto bail_not_ready;
1683
1684 vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
1685 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1686 service->localport, service->remoteport);
1687 break;
1688 case VCHIQ_MSG_DATA:
1689 vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
1690 state->id, header, size, remoteport, localport);
1691
1692 if ((service->remoteport == remoteport) &&
1693 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1694 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1695 claim_slot(state->rx_info);
1696 DEBUG_TRACE(PARSE_LINE);
1697 if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
1698 NULL) == VCHIQ_RETRY) {
1699 DEBUG_TRACE(PARSE_LINE);
1700 goto bail_not_ready;
1701 }
1702 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1703 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
1704 } else {
1705 VCHIQ_STATS_INC(state, error_count);
1706 }
1707 break;
1708 case VCHIQ_MSG_CONNECT:
1709 vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
1710 state->version_common = ((struct vchiq_slot_zero *)
1711 state->slot_data)->version;
1712 complete(&state->connect);
1713 break;
1714 case VCHIQ_MSG_BULK_RX:
1715 case VCHIQ_MSG_BULK_TX:
1716 /*
1717 * We should never receive a bulk request from the
1718 * other side since we're not setup to perform as the
1719 * master.
1720 */
1721 WARN_ON(1);
1722 break;
1723 case VCHIQ_MSG_BULK_RX_DONE:
1724 case VCHIQ_MSG_BULK_TX_DONE:
1725 if ((service->remoteport == remoteport) &&
1726 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1727 struct vchiq_bulk_queue *queue;
1728 struct vchiq_bulk *bulk;
1729
1730 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1731 &service->bulk_rx : &service->bulk_tx;
1732
1733 DEBUG_TRACE(PARSE_LINE);
1734 if (mutex_lock_killable(&service->bulk_mutex)) {
1735 DEBUG_TRACE(PARSE_LINE);
1736 goto bail_not_ready;
1737 }
1738 if ((int)(queue->remote_insert -
1739 queue->local_insert) >= 0) {
1740 vchiq_log_error(vchiq_core_log_level,
1741 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1742 state->id, msg_type_str(type), header, remoteport,
1743 localport, queue->remote_insert,
1744 queue->local_insert);
1745 mutex_unlock(&service->bulk_mutex);
1746 break;
1747 }
1748 if (queue->process != queue->remote_insert) {
1749 pr_err("%s: p %x != ri %x\n",
1750 __func__,
1751 queue->process,
1752 queue->remote_insert);
1753 mutex_unlock(&service->bulk_mutex);
1754 goto bail_not_ready;
1755 }
1756
1757 bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
1758 bulk->actual = *(int *)header->data;
1759 queue->remote_insert++;
1760
1761 vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
1762 state->id, msg_type_str(type), header, remoteport, localport,
1763 bulk->actual, &bulk->data);
1764
1765 vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
1766 state->id, localport,
1767 (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
1768 queue->local_insert, queue->remote_insert, queue->process);
1769
1770 DEBUG_TRACE(PARSE_LINE);
1771 WARN_ON(queue->process == queue->local_insert);
1772 vchiq_complete_bulk(bulk);
1773 queue->process++;
1774 mutex_unlock(&service->bulk_mutex);
1775 DEBUG_TRACE(PARSE_LINE);
1776 notify_bulks(service, queue, RETRY_POLL);
1777 DEBUG_TRACE(PARSE_LINE);
1778 }
1779 break;
1780 case VCHIQ_MSG_PADDING:
1781 vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
1782 state->id, header, size);
1783 break;
1784 case VCHIQ_MSG_PAUSE:
1785 /* If initiated, signal the application thread */
1786 vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
1787 state->id, header, size);
1788 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1789 vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
1790 state->id);
1791 break;
1792 }
1793 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1794 /* Send a PAUSE in response */
1795 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1796 QMFLAGS_NO_MUTEX_UNLOCK) == VCHIQ_RETRY)
1797 goto bail_not_ready;
1798 }
1799 /* At this point slot_mutex is held */
1800 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1801 break;
1802 case VCHIQ_MSG_RESUME:
1803 vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
1804 state->id, header, size);
1805 /* Release the slot mutex */
1806 mutex_unlock(&state->slot_mutex);
1807 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1808 break;
1809
1810 case VCHIQ_MSG_REMOTE_USE:
1811 vchiq_on_remote_use(state);
1812 break;
1813 case VCHIQ_MSG_REMOTE_RELEASE:
1814 vchiq_on_remote_release(state);
1815 break;
1816 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1817 break;
1818
1819 default:
1820 vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
1821 state->id, msgid, header, size);
1822 WARN(1, "invalid message\n");
1823 break;
1824 }
1825
1826 skip_message:
1827 ret = size;
1828
1829 bail_not_ready:
1830 if (service)
1831 vchiq_service_put(service);
1832
1833 return ret;
1834 }
1835
1836 /* Called by the slot handler thread */
1837 static void
parse_rx_slots(struct vchiq_state * state)1838 parse_rx_slots(struct vchiq_state *state)
1839 {
1840 struct vchiq_shared_state *remote = state->remote;
1841 int tx_pos;
1842
1843 DEBUG_INITIALISE(state->local);
1844
1845 tx_pos = remote->tx_pos;
1846
1847 while (state->rx_pos != tx_pos) {
1848 struct vchiq_header *header;
1849 int size;
1850
1851 DEBUG_TRACE(PARSE_LINE);
1852 if (!state->rx_data) {
1853 int rx_index;
1854
1855 WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1856 rx_index = remote->slot_queue[
1857 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1858 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1859 rx_index);
1860 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1861
1862 /*
1863 * Initialise use_count to one, and increment
1864 * release_count at the end of the slot to avoid
1865 * releasing the slot prematurely.
1866 */
1867 state->rx_info->use_count = 1;
1868 state->rx_info->release_count = 0;
1869 }
1870
1871 header = (struct vchiq_header *)(state->rx_data +
1872 (state->rx_pos & VCHIQ_SLOT_MASK));
1873 size = parse_message(state, header);
1874 if (size < 0)
1875 return;
1876
1877 state->rx_pos += calc_stride(size);
1878
1879 DEBUG_TRACE(PARSE_LINE);
1880 /*
1881 * Perform some housekeeping when the end of the slot is
1882 * reached.
1883 */
1884 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1885 /* Remove the extra reference count. */
1886 release_slot(state, state->rx_info, NULL, NULL);
1887 state->rx_data = NULL;
1888 }
1889 }
1890 }
1891
1892 /**
1893 * handle_poll() - handle service polling and other rare conditions
1894 * @state: vchiq state struct
1895 *
1896 * Context: Process context
1897 *
1898 * Return:
1899 * * 0 - poll handled successful
1900 * * -EAGAIN - retry later
1901 */
1902 static int
handle_poll(struct vchiq_state * state)1903 handle_poll(struct vchiq_state *state)
1904 {
1905 switch (state->conn_state) {
1906 case VCHIQ_CONNSTATE_CONNECTED:
1907 /* Poll the services as requested */
1908 poll_services(state);
1909 break;
1910
1911 case VCHIQ_CONNSTATE_PAUSING:
1912 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1913 QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) {
1914 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
1915 } else {
1916 /* Retry later */
1917 return -EAGAIN;
1918 }
1919 break;
1920
1921 case VCHIQ_CONNSTATE_RESUMING:
1922 if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
1923 QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) {
1924 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1925 } else {
1926 /*
1927 * This should really be impossible,
1928 * since the PAUSE should have flushed
1929 * through outstanding messages.
1930 */
1931 vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
1932 }
1933 break;
1934 default:
1935 break;
1936 }
1937
1938 return 0;
1939 }
1940
1941 /* Called by the slot handler thread */
1942 static int
slot_handler_func(void * v)1943 slot_handler_func(void *v)
1944 {
1945 struct vchiq_state *state = v;
1946 struct vchiq_shared_state *local = state->local;
1947
1948 DEBUG_INITIALISE(local);
1949
1950 while (1) {
1951 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1952 DEBUG_TRACE(SLOT_HANDLER_LINE);
1953 remote_event_wait(&state->trigger_event, &local->trigger);
1954
1955 rmb();
1956
1957 DEBUG_TRACE(SLOT_HANDLER_LINE);
1958 if (state->poll_needed) {
1959 state->poll_needed = 0;
1960
1961 /*
1962 * Handle service polling and other rare conditions here
1963 * out of the mainline code
1964 */
1965 if (handle_poll(state) == -EAGAIN)
1966 state->poll_needed = 1;
1967 }
1968
1969 DEBUG_TRACE(SLOT_HANDLER_LINE);
1970 parse_rx_slots(state);
1971 }
1972 return 0;
1973 }
1974
1975 /* Called by the recycle thread */
1976 static int
recycle_func(void * v)1977 recycle_func(void *v)
1978 {
1979 struct vchiq_state *state = v;
1980 struct vchiq_shared_state *local = state->local;
1981 u32 *found;
1982 size_t length;
1983
1984 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1985
1986 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1987 GFP_KERNEL);
1988 if (!found)
1989 return -ENOMEM;
1990
1991 while (1) {
1992 remote_event_wait(&state->recycle_event, &local->recycle);
1993
1994 process_free_queue(state, found, length);
1995 }
1996 return 0;
1997 }
1998
1999 /* Called by the sync thread */
2000 static int
sync_func(void * v)2001 sync_func(void *v)
2002 {
2003 struct vchiq_state *state = v;
2004 struct vchiq_shared_state *local = state->local;
2005 struct vchiq_header *header =
2006 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2007 state->remote->slot_sync);
2008
2009 while (1) {
2010 struct vchiq_service *service;
2011 int msgid, size;
2012 int type;
2013 unsigned int localport, remoteport;
2014
2015 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2016
2017 rmb();
2018
2019 msgid = header->msgid;
2020 size = header->size;
2021 type = VCHIQ_MSG_TYPE(msgid);
2022 localport = VCHIQ_MSG_DSTPORT(msgid);
2023 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2024
2025 service = find_service_by_port(state, localport);
2026
2027 if (!service) {
2028 vchiq_log_error(vchiq_sync_log_level,
2029 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2030 state->id, msg_type_str(type), header,
2031 remoteport, localport, localport);
2032 release_message_sync(state, header);
2033 continue;
2034 }
2035
2036 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2037 int svc_fourcc;
2038
2039 svc_fourcc = service
2040 ? service->base.fourcc
2041 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2042 vchiq_log_trace(vchiq_sync_log_level,
2043 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2044 msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2045 remoteport, localport, size);
2046 if (size > 0)
2047 vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
2048 }
2049
2050 switch (type) {
2051 case VCHIQ_MSG_OPENACK:
2052 if (size >= sizeof(struct vchiq_openack_payload)) {
2053 const struct vchiq_openack_payload *payload =
2054 (struct vchiq_openack_payload *)
2055 header->data;
2056 service->peer_version = payload->version;
2057 }
2058 vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2059 state->id, header, size, remoteport, localport,
2060 service->peer_version);
2061 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2062 service->remoteport = remoteport;
2063 set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2064 service->sync = 1;
2065 complete(&service->remove_event);
2066 }
2067 release_message_sync(state, header);
2068 break;
2069
2070 case VCHIQ_MSG_DATA:
2071 vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
2072 state->id, header, size, remoteport, localport);
2073
2074 if ((service->remoteport == remoteport) &&
2075 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2076 if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2077 NULL) == VCHIQ_RETRY)
2078 vchiq_log_error(vchiq_sync_log_level,
2079 "synchronous callback to service %d returns VCHIQ_RETRY",
2080 localport);
2081 }
2082 break;
2083
2084 default:
2085 vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
2086 state->id, msgid, header, size);
2087 release_message_sync(state, header);
2088 break;
2089 }
2090
2091 vchiq_service_put(service);
2092 }
2093
2094 return 0;
2095 }
2096
2097 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2098 get_conn_state_name(enum vchiq_connstate conn_state)
2099 {
2100 return conn_state_names[conn_state];
2101 }
2102
2103 struct vchiq_slot_zero *
vchiq_init_slots(void * mem_base,int mem_size)2104 vchiq_init_slots(void *mem_base, int mem_size)
2105 {
2106 int mem_align =
2107 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2108 struct vchiq_slot_zero *slot_zero =
2109 (struct vchiq_slot_zero *)(mem_base + mem_align);
2110 int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2111 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2112
2113 check_sizes();
2114
2115 /* Ensure there is enough memory to run an absolutely minimum system */
2116 num_slots -= first_data_slot;
2117
2118 if (num_slots < 4) {
2119 vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
2120 __func__, mem_size);
2121 return NULL;
2122 }
2123
2124 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2125
2126 slot_zero->magic = VCHIQ_MAGIC;
2127 slot_zero->version = VCHIQ_VERSION;
2128 slot_zero->version_min = VCHIQ_VERSION_MIN;
2129 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2130 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2131 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2132 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2133
2134 slot_zero->master.slot_sync = first_data_slot;
2135 slot_zero->master.slot_first = first_data_slot + 1;
2136 slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2137 slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2138 slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2139 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2140
2141 return slot_zero;
2142 }
2143
2144 int
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero)2145 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2146 {
2147 struct vchiq_shared_state *local;
2148 struct vchiq_shared_state *remote;
2149 char threadname[16];
2150 int i, ret;
2151
2152 if (vchiq_states[0]) {
2153 pr_err("%s: VCHIQ state already initialized\n", __func__);
2154 return -EINVAL;
2155 }
2156
2157 local = &slot_zero->slave;
2158 remote = &slot_zero->master;
2159
2160 if (local->initialised) {
2161 vchiq_loud_error_header();
2162 if (remote->initialised)
2163 vchiq_loud_error("local state has already been initialised");
2164 else
2165 vchiq_loud_error("master/slave mismatch two slaves");
2166 vchiq_loud_error_footer();
2167 return -EINVAL;
2168 }
2169
2170 memset(state, 0, sizeof(struct vchiq_state));
2171
2172 /*
2173 * initialize shared state pointers
2174 */
2175
2176 state->local = local;
2177 state->remote = remote;
2178 state->slot_data = (struct vchiq_slot *)slot_zero;
2179
2180 /*
2181 * initialize events and mutexes
2182 */
2183
2184 init_completion(&state->connect);
2185 mutex_init(&state->mutex);
2186 mutex_init(&state->slot_mutex);
2187 mutex_init(&state->recycle_mutex);
2188 mutex_init(&state->sync_mutex);
2189 mutex_init(&state->bulk_transfer_mutex);
2190
2191 init_completion(&state->slot_available_event);
2192 init_completion(&state->slot_remove_event);
2193 init_completion(&state->data_quota_event);
2194
2195 state->slot_queue_available = 0;
2196
2197 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2198 struct vchiq_service_quota *quota = &state->service_quotas[i];
2199 init_completion("a->quota_event);
2200 }
2201
2202 for (i = local->slot_first; i <= local->slot_last; i++) {
2203 local->slot_queue[state->slot_queue_available] = i;
2204 state->slot_queue_available++;
2205 complete(&state->slot_available_event);
2206 }
2207
2208 state->default_slot_quota = state->slot_queue_available / 2;
2209 state->default_message_quota =
2210 min_t(unsigned short, state->default_slot_quota * 256, ~0);
2211
2212 state->previous_data_index = -1;
2213 state->data_use_count = 0;
2214 state->data_quota = state->slot_queue_available - 1;
2215
2216 remote_event_create(&state->trigger_event, &local->trigger);
2217 local->tx_pos = 0;
2218 remote_event_create(&state->recycle_event, &local->recycle);
2219 local->slot_queue_recycle = state->slot_queue_available;
2220 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2221 remote_event_create(&state->sync_release_event, &local->sync_release);
2222
2223 /* At start-of-day, the slot is empty and available */
2224 ((struct vchiq_header *)
2225 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2226 VCHIQ_MSGID_PADDING;
2227 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2228
2229 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2230
2231 ret = vchiq_platform_init_state(state);
2232 if (ret)
2233 return ret;
2234
2235 /*
2236 * bring up slot handler thread
2237 */
2238 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2239 state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2240
2241 if (IS_ERR(state->slot_handler_thread)) {
2242 vchiq_loud_error_header();
2243 vchiq_loud_error("couldn't create thread %s", threadname);
2244 vchiq_loud_error_footer();
2245 return PTR_ERR(state->slot_handler_thread);
2246 }
2247 set_user_nice(state->slot_handler_thread, -19);
2248
2249 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2250 state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2251 if (IS_ERR(state->recycle_thread)) {
2252 vchiq_loud_error_header();
2253 vchiq_loud_error("couldn't create thread %s", threadname);
2254 vchiq_loud_error_footer();
2255 ret = PTR_ERR(state->recycle_thread);
2256 goto fail_free_handler_thread;
2257 }
2258 set_user_nice(state->recycle_thread, -19);
2259
2260 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2261 state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2262 if (IS_ERR(state->sync_thread)) {
2263 vchiq_loud_error_header();
2264 vchiq_loud_error("couldn't create thread %s", threadname);
2265 vchiq_loud_error_footer();
2266 ret = PTR_ERR(state->sync_thread);
2267 goto fail_free_recycle_thread;
2268 }
2269 set_user_nice(state->sync_thread, -20);
2270
2271 wake_up_process(state->slot_handler_thread);
2272 wake_up_process(state->recycle_thread);
2273 wake_up_process(state->sync_thread);
2274
2275 vchiq_states[0] = state;
2276
2277 /* Indicate readiness to the other side */
2278 local->initialised = 1;
2279
2280 return 0;
2281
2282 fail_free_recycle_thread:
2283 kthread_stop(state->recycle_thread);
2284 fail_free_handler_thread:
2285 kthread_stop(state->slot_handler_thread);
2286
2287 return ret;
2288 }
2289
vchiq_msg_queue_push(unsigned int handle,struct vchiq_header * header)2290 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2291 {
2292 struct vchiq_service *service = find_service_by_handle(handle);
2293 int pos;
2294
2295 if (!service)
2296 return;
2297
2298 while (service->msg_queue_write == service->msg_queue_read +
2299 VCHIQ_MAX_SLOTS) {
2300 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2301 flush_signals(current);
2302 }
2303
2304 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2305 service->msg_queue_write++;
2306 service->msg_queue[pos] = header;
2307
2308 complete(&service->msg_queue_push);
2309 }
2310 EXPORT_SYMBOL(vchiq_msg_queue_push);
2311
vchiq_msg_hold(unsigned int handle)2312 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2313 {
2314 struct vchiq_service *service = find_service_by_handle(handle);
2315 struct vchiq_header *header;
2316 int pos;
2317
2318 if (!service)
2319 return NULL;
2320
2321 if (service->msg_queue_write == service->msg_queue_read)
2322 return NULL;
2323
2324 while (service->msg_queue_write == service->msg_queue_read) {
2325 if (wait_for_completion_interruptible(&service->msg_queue_push))
2326 flush_signals(current);
2327 }
2328
2329 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2330 service->msg_queue_read++;
2331 header = service->msg_queue[pos];
2332
2333 complete(&service->msg_queue_pop);
2334
2335 return header;
2336 }
2337 EXPORT_SYMBOL(vchiq_msg_hold);
2338
vchiq_validate_params(const struct vchiq_service_params_kernel * params)2339 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2340 {
2341 if (!params->callback || !params->fourcc) {
2342 vchiq_loud_error("Can't add service, invalid params\n");
2343 return -EINVAL;
2344 }
2345
2346 return 0;
2347 }
2348
2349 /* Called from application thread when a client or server service is created. */
2350 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,void (* userdata_term)(void * userdata))2351 vchiq_add_service_internal(struct vchiq_state *state,
2352 const struct vchiq_service_params_kernel *params,
2353 int srvstate, struct vchiq_instance *instance,
2354 void (*userdata_term)(void *userdata))
2355 {
2356 struct vchiq_service *service;
2357 struct vchiq_service __rcu **pservice = NULL;
2358 struct vchiq_service_quota *quota;
2359 int ret;
2360 int i;
2361
2362 ret = vchiq_validate_params(params);
2363 if (ret)
2364 return NULL;
2365
2366 service = kzalloc(sizeof(*service), GFP_KERNEL);
2367 if (!service)
2368 return service;
2369
2370 service->base.fourcc = params->fourcc;
2371 service->base.callback = params->callback;
2372 service->base.userdata = params->userdata;
2373 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2374 kref_init(&service->ref_count);
2375 service->srvstate = VCHIQ_SRVSTATE_FREE;
2376 service->userdata_term = userdata_term;
2377 service->localport = VCHIQ_PORT_FREE;
2378 service->remoteport = VCHIQ_PORT_FREE;
2379
2380 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2381 VCHIQ_FOURCC_INVALID : params->fourcc;
2382 service->auto_close = 1;
2383 atomic_set(&service->poll_flags, 0);
2384 service->version = params->version;
2385 service->version_min = params->version_min;
2386 service->state = state;
2387 service->instance = instance;
2388 init_completion(&service->remove_event);
2389 init_completion(&service->bulk_remove_event);
2390 init_completion(&service->msg_queue_pop);
2391 init_completion(&service->msg_queue_push);
2392 mutex_init(&service->bulk_mutex);
2393
2394 /*
2395 * Although it is perfectly possible to use a spinlock
2396 * to protect the creation of services, it is overkill as it
2397 * disables interrupts while the array is searched.
2398 * The only danger is of another thread trying to create a
2399 * service - service deletion is safe.
2400 * Therefore it is preferable to use state->mutex which,
2401 * although slower to claim, doesn't block interrupts while
2402 * it is held.
2403 */
2404
2405 mutex_lock(&state->mutex);
2406
2407 /* Prepare to use a previously unused service */
2408 if (state->unused_service < VCHIQ_MAX_SERVICES)
2409 pservice = &state->services[state->unused_service];
2410
2411 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2412 for (i = 0; i < state->unused_service; i++) {
2413 if (!rcu_access_pointer(state->services[i])) {
2414 pservice = &state->services[i];
2415 break;
2416 }
2417 }
2418 } else {
2419 rcu_read_lock();
2420 for (i = (state->unused_service - 1); i >= 0; i--) {
2421 struct vchiq_service *srv;
2422
2423 srv = rcu_dereference(state->services[i]);
2424 if (!srv) {
2425 pservice = &state->services[i];
2426 } else if ((srv->public_fourcc == params->fourcc) &&
2427 ((srv->instance != instance) ||
2428 (srv->base.callback != params->callback))) {
2429 /*
2430 * There is another server using this
2431 * fourcc which doesn't match.
2432 */
2433 pservice = NULL;
2434 break;
2435 }
2436 }
2437 rcu_read_unlock();
2438 }
2439
2440 if (pservice) {
2441 service->localport = (pservice - state->services);
2442 if (!handle_seq)
2443 handle_seq = VCHIQ_MAX_STATES *
2444 VCHIQ_MAX_SERVICES;
2445 service->handle = handle_seq |
2446 (state->id * VCHIQ_MAX_SERVICES) |
2447 service->localport;
2448 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2449 rcu_assign_pointer(*pservice, service);
2450 if (pservice == &state->services[state->unused_service])
2451 state->unused_service++;
2452 }
2453
2454 mutex_unlock(&state->mutex);
2455
2456 if (!pservice) {
2457 kfree(service);
2458 return NULL;
2459 }
2460
2461 quota = &state->service_quotas[service->localport];
2462 quota->slot_quota = state->default_slot_quota;
2463 quota->message_quota = state->default_message_quota;
2464 if (quota->slot_use_count == 0)
2465 quota->previous_tx_index =
2466 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2467 - 1;
2468
2469 /* Bring this service online */
2470 set_service_state(service, srvstate);
2471
2472 vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
2473 (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2474 VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
2475
2476 /* Don't unlock the service - leave it with a ref_count of 1. */
2477
2478 return service;
2479 }
2480
2481 enum vchiq_status
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2482 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2483 {
2484 struct vchiq_open_payload payload = {
2485 service->base.fourcc,
2486 client_id,
2487 service->version,
2488 service->version_min
2489 };
2490 enum vchiq_status status = VCHIQ_SUCCESS;
2491
2492 service->client_id = client_id;
2493 vchiq_use_service_internal(service);
2494 status = queue_message(service->state,
2495 NULL, MAKE_OPEN(service->localport),
2496 memcpy_copy_callback,
2497 &payload,
2498 sizeof(payload),
2499 QMFLAGS_IS_BLOCKING);
2500
2501 if (status != VCHIQ_SUCCESS)
2502 return status;
2503
2504 /* Wait for the ACK/NAK */
2505 if (wait_for_completion_interruptible(&service->remove_event)) {
2506 status = VCHIQ_RETRY;
2507 vchiq_release_service_internal(service);
2508 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2509 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2510 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2511 vchiq_log_error(vchiq_core_log_level,
2512 "%d: osi - srvstate = %s (ref %u)",
2513 service->state->id,
2514 srvstate_names[service->srvstate],
2515 kref_read(&service->ref_count));
2516 status = VCHIQ_ERROR;
2517 VCHIQ_SERVICE_STATS_INC(service, error_count);
2518 vchiq_release_service_internal(service);
2519 }
2520
2521 return status;
2522 }
2523
2524 static void
release_service_messages(struct vchiq_service * service)2525 release_service_messages(struct vchiq_service *service)
2526 {
2527 struct vchiq_state *state = service->state;
2528 int slot_last = state->remote->slot_last;
2529 int i;
2530
2531 /* Release any claimed messages aimed at this service */
2532
2533 if (service->sync) {
2534 struct vchiq_header *header =
2535 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2536 state->remote->slot_sync);
2537 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2538 release_message_sync(state, header);
2539
2540 return;
2541 }
2542
2543 for (i = state->remote->slot_first; i <= slot_last; i++) {
2544 struct vchiq_slot_info *slot_info =
2545 SLOT_INFO_FROM_INDEX(state, i);
2546 unsigned int pos, end;
2547 char *data;
2548
2549 if (slot_info->release_count == slot_info->use_count)
2550 continue;
2551
2552 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2553 end = VCHIQ_SLOT_SIZE;
2554 if (data == state->rx_data)
2555 /*
2556 * This buffer is still being read from - stop
2557 * at the current read position
2558 */
2559 end = state->rx_pos & VCHIQ_SLOT_MASK;
2560
2561 pos = 0;
2562
2563 while (pos < end) {
2564 struct vchiq_header *header =
2565 (struct vchiq_header *)(data + pos);
2566 int msgid = header->msgid;
2567 int port = VCHIQ_MSG_DSTPORT(msgid);
2568
2569 if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2570 vchiq_log_info(vchiq_core_log_level, " fsi - hdr %pK", header);
2571 release_slot(state, slot_info, header, NULL);
2572 }
2573 pos += calc_stride(header->size);
2574 if (pos > VCHIQ_SLOT_SIZE) {
2575 vchiq_log_error(vchiq_core_log_level,
2576 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2577 pos, header, msgid, header->msgid, header->size);
2578 WARN(1, "invalid slot position\n");
2579 }
2580 }
2581 }
2582 }
2583
2584 static int
do_abort_bulks(struct vchiq_service * service)2585 do_abort_bulks(struct vchiq_service *service)
2586 {
2587 enum vchiq_status status;
2588
2589 /* Abort any outstanding bulk transfers */
2590 if (mutex_lock_killable(&service->bulk_mutex))
2591 return 0;
2592 abort_outstanding_bulks(service, &service->bulk_tx);
2593 abort_outstanding_bulks(service, &service->bulk_rx);
2594 mutex_unlock(&service->bulk_mutex);
2595
2596 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2597 if (status != VCHIQ_SUCCESS)
2598 return 0;
2599
2600 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2601 return (status == VCHIQ_SUCCESS);
2602 }
2603
2604 static enum vchiq_status
close_service_complete(struct vchiq_service * service,int failstate)2605 close_service_complete(struct vchiq_service *service, int failstate)
2606 {
2607 enum vchiq_status status;
2608 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2609 int newstate;
2610
2611 switch (service->srvstate) {
2612 case VCHIQ_SRVSTATE_OPEN:
2613 case VCHIQ_SRVSTATE_CLOSESENT:
2614 case VCHIQ_SRVSTATE_CLOSERECVD:
2615 if (is_server) {
2616 if (service->auto_close) {
2617 service->client_id = 0;
2618 service->remoteport = VCHIQ_PORT_FREE;
2619 newstate = VCHIQ_SRVSTATE_LISTENING;
2620 } else {
2621 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2622 }
2623 } else {
2624 newstate = VCHIQ_SRVSTATE_CLOSED;
2625 }
2626 set_service_state(service, newstate);
2627 break;
2628 case VCHIQ_SRVSTATE_LISTENING:
2629 break;
2630 default:
2631 vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
2632 service->handle, srvstate_names[service->srvstate]);
2633 WARN(1, "%s in unexpected state\n", __func__);
2634 return VCHIQ_ERROR;
2635 }
2636
2637 status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2638
2639 if (status != VCHIQ_RETRY) {
2640 int uc = service->service_use_count;
2641 int i;
2642 /* Complete the close process */
2643 for (i = 0; i < uc; i++)
2644 /*
2645 * cater for cases where close is forced and the
2646 * client may not close all it's handles
2647 */
2648 vchiq_release_service_internal(service);
2649
2650 service->client_id = 0;
2651 service->remoteport = VCHIQ_PORT_FREE;
2652
2653 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2654 vchiq_free_service_internal(service);
2655 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2656 if (is_server)
2657 service->closing = 0;
2658
2659 complete(&service->remove_event);
2660 }
2661 } else {
2662 set_service_state(service, failstate);
2663 }
2664
2665 return status;
2666 }
2667
2668 /* Called by the slot handler */
2669 enum vchiq_status
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2670 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2671 {
2672 struct vchiq_state *state = service->state;
2673 enum vchiq_status status = VCHIQ_SUCCESS;
2674 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2675 int close_id = MAKE_CLOSE(service->localport,
2676 VCHIQ_MSG_DSTPORT(service->remoteport));
2677
2678 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
2679 service->localport, close_recvd, srvstate_names[service->srvstate]);
2680
2681 switch (service->srvstate) {
2682 case VCHIQ_SRVSTATE_CLOSED:
2683 case VCHIQ_SRVSTATE_HIDDEN:
2684 case VCHIQ_SRVSTATE_LISTENING:
2685 case VCHIQ_SRVSTATE_CLOSEWAIT:
2686 if (close_recvd) {
2687 vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
2688 __func__, srvstate_names[service->srvstate]);
2689 } else if (is_server) {
2690 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2691 status = VCHIQ_ERROR;
2692 } else {
2693 service->client_id = 0;
2694 service->remoteport = VCHIQ_PORT_FREE;
2695 if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
2696 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2697 }
2698 complete(&service->remove_event);
2699 } else {
2700 vchiq_free_service_internal(service);
2701 }
2702 break;
2703 case VCHIQ_SRVSTATE_OPENING:
2704 if (close_recvd) {
2705 /* The open was rejected - tell the user */
2706 set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
2707 complete(&service->remove_event);
2708 } else {
2709 /* Shutdown mid-open - let the other side know */
2710 status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
2711 }
2712 break;
2713
2714 case VCHIQ_SRVSTATE_OPENSYNC:
2715 mutex_lock(&state->sync_mutex);
2716 fallthrough;
2717 case VCHIQ_SRVSTATE_OPEN:
2718 if (close_recvd) {
2719 if (!do_abort_bulks(service))
2720 status = VCHIQ_RETRY;
2721 }
2722
2723 release_service_messages(service);
2724
2725 if (status == VCHIQ_SUCCESS)
2726 status = queue_message(state, service, close_id, NULL,
2727 NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2728
2729 if (status != VCHIQ_SUCCESS) {
2730 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2731 mutex_unlock(&state->sync_mutex);
2732 break;
2733 }
2734
2735 if (!close_recvd) {
2736 /* Change the state while the mutex is still held */
2737 set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
2738 mutex_unlock(&state->slot_mutex);
2739 if (service->sync)
2740 mutex_unlock(&state->sync_mutex);
2741 break;
2742 }
2743
2744 /* Change the state while the mutex is still held */
2745 set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2746 mutex_unlock(&state->slot_mutex);
2747 if (service->sync)
2748 mutex_unlock(&state->sync_mutex);
2749
2750 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2751 break;
2752
2753 case VCHIQ_SRVSTATE_CLOSESENT:
2754 if (!close_recvd)
2755 /* This happens when a process is killed mid-close */
2756 break;
2757
2758 if (!do_abort_bulks(service)) {
2759 status = VCHIQ_RETRY;
2760 break;
2761 }
2762
2763 if (status == VCHIQ_SUCCESS)
2764 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2765 break;
2766
2767 case VCHIQ_SRVSTATE_CLOSERECVD:
2768 if (!close_recvd && is_server)
2769 /* Force into LISTENING mode */
2770 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2771 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2772 break;
2773
2774 default:
2775 vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
2776 close_recvd, srvstate_names[service->srvstate]);
2777 break;
2778 }
2779
2780 return status;
2781 }
2782
2783 /* Called from the application process upon process death */
2784 void
vchiq_terminate_service_internal(struct vchiq_service * service)2785 vchiq_terminate_service_internal(struct vchiq_service *service)
2786 {
2787 struct vchiq_state *state = service->state;
2788
2789 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
2790 service->localport, service->remoteport);
2791
2792 mark_service_closing(service);
2793
2794 /* Mark the service for removal by the slot handler */
2795 request_poll(state, service, VCHIQ_POLL_REMOVE);
2796 }
2797
2798 /* Called from the slot handler */
2799 void
vchiq_free_service_internal(struct vchiq_service * service)2800 vchiq_free_service_internal(struct vchiq_service *service)
2801 {
2802 struct vchiq_state *state = service->state;
2803
2804 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
2805
2806 switch (service->srvstate) {
2807 case VCHIQ_SRVSTATE_OPENING:
2808 case VCHIQ_SRVSTATE_CLOSED:
2809 case VCHIQ_SRVSTATE_HIDDEN:
2810 case VCHIQ_SRVSTATE_LISTENING:
2811 case VCHIQ_SRVSTATE_CLOSEWAIT:
2812 break;
2813 default:
2814 vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
2815 service->localport, srvstate_names[service->srvstate]);
2816 return;
2817 }
2818
2819 set_service_state(service, VCHIQ_SRVSTATE_FREE);
2820
2821 complete(&service->remove_event);
2822
2823 /* Release the initial lock */
2824 vchiq_service_put(service);
2825 }
2826
2827 enum vchiq_status
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2828 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2829 {
2830 struct vchiq_service *service;
2831 int i;
2832
2833 /* Find all services registered to this client and enable them. */
2834 i = 0;
2835 while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2836 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2837 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2838 vchiq_service_put(service);
2839 }
2840
2841 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2842 if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
2843 QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2844 return VCHIQ_RETRY;
2845
2846 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2847 }
2848
2849 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2850 if (wait_for_completion_interruptible(&state->connect))
2851 return VCHIQ_RETRY;
2852
2853 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2854 complete(&state->connect);
2855 }
2856
2857 return VCHIQ_SUCCESS;
2858 }
2859
2860 void
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2861 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2862 {
2863 struct vchiq_service *service;
2864 int i;
2865
2866 /* Find all services registered to this client and remove them. */
2867 i = 0;
2868 while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2869 (void)vchiq_remove_service(service->handle);
2870 vchiq_service_put(service);
2871 }
2872 }
2873
2874 enum vchiq_status
vchiq_close_service(unsigned int handle)2875 vchiq_close_service(unsigned int handle)
2876 {
2877 /* Unregister the service */
2878 struct vchiq_service *service = find_service_by_handle(handle);
2879 enum vchiq_status status = VCHIQ_SUCCESS;
2880
2881 if (!service)
2882 return VCHIQ_ERROR;
2883
2884 vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
2885 service->state->id, service->localport);
2886
2887 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2888 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2889 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2890 vchiq_service_put(service);
2891 return VCHIQ_ERROR;
2892 }
2893
2894 mark_service_closing(service);
2895
2896 if (current == service->state->slot_handler_thread) {
2897 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2898 WARN_ON(status == VCHIQ_RETRY);
2899 } else {
2900 /* Mark the service for termination by the slot handler */
2901 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2902 }
2903
2904 while (1) {
2905 if (wait_for_completion_interruptible(&service->remove_event)) {
2906 status = VCHIQ_RETRY;
2907 break;
2908 }
2909
2910 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2911 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2912 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2913 break;
2914
2915 vchiq_log_warning(vchiq_core_log_level,
2916 "%d: close_service:%d - waiting in state %s",
2917 service->state->id, service->localport,
2918 srvstate_names[service->srvstate]);
2919 }
2920
2921 if ((status == VCHIQ_SUCCESS) &&
2922 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2923 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2924 status = VCHIQ_ERROR;
2925
2926 vchiq_service_put(service);
2927
2928 return status;
2929 }
2930 EXPORT_SYMBOL(vchiq_close_service);
2931
2932 enum vchiq_status
vchiq_remove_service(unsigned int handle)2933 vchiq_remove_service(unsigned int handle)
2934 {
2935 /* Unregister the service */
2936 struct vchiq_service *service = find_service_by_handle(handle);
2937 enum vchiq_status status = VCHIQ_SUCCESS;
2938
2939 if (!service)
2940 return VCHIQ_ERROR;
2941
2942 vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
2943 service->state->id, service->localport);
2944
2945 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2946 vchiq_service_put(service);
2947 return VCHIQ_ERROR;
2948 }
2949
2950 mark_service_closing(service);
2951
2952 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2953 (current == service->state->slot_handler_thread)) {
2954 /*
2955 * Make it look like a client, because it must be removed and
2956 * not left in the LISTENING state.
2957 */
2958 service->public_fourcc = VCHIQ_FOURCC_INVALID;
2959
2960 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2961 WARN_ON(status == VCHIQ_RETRY);
2962 } else {
2963 /* Mark the service for removal by the slot handler */
2964 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2965 }
2966 while (1) {
2967 if (wait_for_completion_interruptible(&service->remove_event)) {
2968 status = VCHIQ_RETRY;
2969 break;
2970 }
2971
2972 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2973 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2974 break;
2975
2976 vchiq_log_warning(vchiq_core_log_level,
2977 "%d: remove_service:%d - waiting in state %s",
2978 service->state->id, service->localport,
2979 srvstate_names[service->srvstate]);
2980 }
2981
2982 if ((status == VCHIQ_SUCCESS) &&
2983 (service->srvstate != VCHIQ_SRVSTATE_FREE))
2984 status = VCHIQ_ERROR;
2985
2986 vchiq_service_put(service);
2987
2988 return status;
2989 }
2990
2991 /*
2992 * This function may be called by kernel threads or user threads.
2993 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
2994 * received and the call should be retried after being returned to user
2995 * context.
2996 * When called in blocking mode, the userdata field points to a bulk_waiter
2997 * structure.
2998 */
vchiq_bulk_transfer(unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)2999 enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
3000 int size, void *userdata, enum vchiq_bulk_mode mode,
3001 enum vchiq_bulk_dir dir)
3002 {
3003 struct vchiq_service *service = find_service_by_handle(handle);
3004 struct vchiq_bulk_queue *queue;
3005 struct vchiq_bulk *bulk;
3006 struct vchiq_state *state;
3007 struct bulk_waiter *bulk_waiter = NULL;
3008 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3009 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3010 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3011 enum vchiq_status status = VCHIQ_ERROR;
3012 int payload[2];
3013
3014 if (!service)
3015 goto error_exit;
3016
3017 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3018 goto error_exit;
3019
3020 if (!offset && !uoffset)
3021 goto error_exit;
3022
3023 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3024 goto error_exit;
3025
3026 switch (mode) {
3027 case VCHIQ_BULK_MODE_NOCALLBACK:
3028 case VCHIQ_BULK_MODE_CALLBACK:
3029 break;
3030 case VCHIQ_BULK_MODE_BLOCKING:
3031 bulk_waiter = userdata;
3032 init_completion(&bulk_waiter->event);
3033 bulk_waiter->actual = 0;
3034 bulk_waiter->bulk = NULL;
3035 break;
3036 case VCHIQ_BULK_MODE_WAITING:
3037 bulk_waiter = userdata;
3038 bulk = bulk_waiter->bulk;
3039 goto waiting;
3040 default:
3041 goto error_exit;
3042 }
3043
3044 state = service->state;
3045
3046 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3047 &service->bulk_tx : &service->bulk_rx;
3048
3049 if (mutex_lock_killable(&service->bulk_mutex)) {
3050 status = VCHIQ_RETRY;
3051 goto error_exit;
3052 }
3053
3054 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3055 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3056 do {
3057 mutex_unlock(&service->bulk_mutex);
3058 if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
3059 status = VCHIQ_RETRY;
3060 goto error_exit;
3061 }
3062 if (mutex_lock_killable(&service->bulk_mutex)) {
3063 status = VCHIQ_RETRY;
3064 goto error_exit;
3065 }
3066 } while (queue->local_insert == queue->remove +
3067 VCHIQ_NUM_SERVICE_BULKS);
3068 }
3069
3070 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3071
3072 bulk->mode = mode;
3073 bulk->dir = dir;
3074 bulk->userdata = userdata;
3075 bulk->size = size;
3076 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3077
3078 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3079 goto unlock_error_exit;
3080
3081 wmb();
3082
3083 vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
3084 state->id, service->localport, service->remoteport,
3085 dir_char, size, &bulk->data, userdata);
3086
3087 /*
3088 * The slot mutex must be held when the service is being closed, so
3089 * claim it here to ensure that isn't happening
3090 */
3091 if (mutex_lock_killable(&state->slot_mutex)) {
3092 status = VCHIQ_RETRY;
3093 goto cancel_bulk_error_exit;
3094 }
3095
3096 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3097 goto unlock_both_error_exit;
3098
3099 payload[0] = lower_32_bits(bulk->data);
3100 payload[1] = bulk->size;
3101 status = queue_message(state,
3102 NULL,
3103 VCHIQ_MAKE_MSG(dir_msgtype,
3104 service->localport,
3105 service->remoteport),
3106 memcpy_copy_callback,
3107 &payload,
3108 sizeof(payload),
3109 QMFLAGS_IS_BLOCKING |
3110 QMFLAGS_NO_MUTEX_LOCK |
3111 QMFLAGS_NO_MUTEX_UNLOCK);
3112 if (status != VCHIQ_SUCCESS)
3113 goto unlock_both_error_exit;
3114
3115 queue->local_insert++;
3116
3117 mutex_unlock(&state->slot_mutex);
3118 mutex_unlock(&service->bulk_mutex);
3119
3120 vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
3121 state->id, service->localport, dir_char, queue->local_insert,
3122 queue->remote_insert, queue->process);
3123
3124 waiting:
3125 vchiq_service_put(service);
3126
3127 status = VCHIQ_SUCCESS;
3128
3129 if (bulk_waiter) {
3130 bulk_waiter->bulk = bulk;
3131 if (wait_for_completion_interruptible(&bulk_waiter->event))
3132 status = VCHIQ_RETRY;
3133 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3134 status = VCHIQ_ERROR;
3135 }
3136
3137 return status;
3138
3139 unlock_both_error_exit:
3140 mutex_unlock(&state->slot_mutex);
3141 cancel_bulk_error_exit:
3142 vchiq_complete_bulk(bulk);
3143 unlock_error_exit:
3144 mutex_unlock(&service->bulk_mutex);
3145
3146 error_exit:
3147 if (service)
3148 vchiq_service_put(service);
3149 return status;
3150 }
3151
3152 enum vchiq_status
vchiq_queue_message(unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3153 vchiq_queue_message(unsigned int handle,
3154 ssize_t (*copy_callback)(void *context, void *dest,
3155 size_t offset, size_t maxsize),
3156 void *context,
3157 size_t size)
3158 {
3159 struct vchiq_service *service = find_service_by_handle(handle);
3160 enum vchiq_status status = VCHIQ_ERROR;
3161 int data_id;
3162
3163 if (!service)
3164 goto error_exit;
3165
3166 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3167 goto error_exit;
3168
3169 if (!size) {
3170 VCHIQ_SERVICE_STATS_INC(service, error_count);
3171 goto error_exit;
3172 }
3173
3174 if (size > VCHIQ_MAX_MSG_SIZE) {
3175 VCHIQ_SERVICE_STATS_INC(service, error_count);
3176 goto error_exit;
3177 }
3178
3179 data_id = MAKE_DATA(service->localport, service->remoteport);
3180
3181 switch (service->srvstate) {
3182 case VCHIQ_SRVSTATE_OPEN:
3183 status = queue_message(service->state, service, data_id,
3184 copy_callback, context, size, 1);
3185 break;
3186 case VCHIQ_SRVSTATE_OPENSYNC:
3187 status = queue_message_sync(service->state, service, data_id,
3188 copy_callback, context, size, 1);
3189 break;
3190 default:
3191 status = VCHIQ_ERROR;
3192 break;
3193 }
3194
3195 error_exit:
3196 if (service)
3197 vchiq_service_put(service);
3198
3199 return status;
3200 }
3201
vchiq_queue_kernel_message(unsigned int handle,void * data,unsigned int size)3202 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3203 {
3204 enum vchiq_status status;
3205
3206 while (1) {
3207 status = vchiq_queue_message(handle, memcpy_copy_callback,
3208 data, size);
3209
3210 /*
3211 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3212 * implement a retry mechanism since this function is supposed
3213 * to block until queued
3214 */
3215 if (status != VCHIQ_RETRY)
3216 break;
3217
3218 msleep(1);
3219 }
3220
3221 return status;
3222 }
3223 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3224
3225 void
vchiq_release_message(unsigned int handle,struct vchiq_header * header)3226 vchiq_release_message(unsigned int handle,
3227 struct vchiq_header *header)
3228 {
3229 struct vchiq_service *service = find_service_by_handle(handle);
3230 struct vchiq_shared_state *remote;
3231 struct vchiq_state *state;
3232 int slot_index;
3233
3234 if (!service)
3235 return;
3236
3237 state = service->state;
3238 remote = state->remote;
3239
3240 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3241
3242 if ((slot_index >= remote->slot_first) &&
3243 (slot_index <= remote->slot_last)) {
3244 int msgid = header->msgid;
3245
3246 if (msgid & VCHIQ_MSGID_CLAIMED) {
3247 struct vchiq_slot_info *slot_info =
3248 SLOT_INFO_FROM_INDEX(state, slot_index);
3249
3250 release_slot(state, slot_info, header, service);
3251 }
3252 } else if (slot_index == remote->slot_sync) {
3253 release_message_sync(state, header);
3254 }
3255
3256 vchiq_service_put(service);
3257 }
3258 EXPORT_SYMBOL(vchiq_release_message);
3259
3260 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3261 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3262 {
3263 header->msgid = VCHIQ_MSGID_PADDING;
3264 remote_event_signal(&state->remote->sync_release);
3265 }
3266
3267 enum vchiq_status
vchiq_get_peer_version(unsigned int handle,short * peer_version)3268 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3269 {
3270 enum vchiq_status status = VCHIQ_ERROR;
3271 struct vchiq_service *service = find_service_by_handle(handle);
3272
3273 if (!service)
3274 goto exit;
3275
3276 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3277 goto exit;
3278
3279 if (!peer_version)
3280 goto exit;
3281
3282 *peer_version = service->peer_version;
3283 status = VCHIQ_SUCCESS;
3284
3285 exit:
3286 if (service)
3287 vchiq_service_put(service);
3288 return status;
3289 }
3290 EXPORT_SYMBOL(vchiq_get_peer_version);
3291
vchiq_get_config(struct vchiq_config * config)3292 void vchiq_get_config(struct vchiq_config *config)
3293 {
3294 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3295 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3296 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3297 config->max_services = VCHIQ_MAX_SERVICES;
3298 config->version = VCHIQ_VERSION;
3299 config->version_min = VCHIQ_VERSION_MIN;
3300 }
3301
3302 int
vchiq_set_service_option(unsigned int handle,enum vchiq_service_option option,int value)3303 vchiq_set_service_option(unsigned int handle, enum vchiq_service_option option, int value)
3304 {
3305 struct vchiq_service *service = find_service_by_handle(handle);
3306 struct vchiq_service_quota *quota;
3307 int ret = -EINVAL;
3308
3309 if (!service)
3310 return -EINVAL;
3311
3312 switch (option) {
3313 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3314 service->auto_close = value;
3315 ret = 0;
3316 break;
3317
3318 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3319 quota = &service->state->service_quotas[service->localport];
3320 if (value == 0)
3321 value = service->state->default_slot_quota;
3322 if ((value >= quota->slot_use_count) &&
3323 (value < (unsigned short)~0)) {
3324 quota->slot_quota = value;
3325 if ((value >= quota->slot_use_count) &&
3326 (quota->message_quota >= quota->message_use_count))
3327 /*
3328 * Signal the service that it may have
3329 * dropped below its quota
3330 */
3331 complete("a->quota_event);
3332 ret = 0;
3333 }
3334 break;
3335
3336 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3337 quota = &service->state->service_quotas[service->localport];
3338 if (value == 0)
3339 value = service->state->default_message_quota;
3340 if ((value >= quota->message_use_count) &&
3341 (value < (unsigned short)~0)) {
3342 quota->message_quota = value;
3343 if ((value >= quota->message_use_count) &&
3344 (quota->slot_quota >= quota->slot_use_count))
3345 /*
3346 * Signal the service that it may have
3347 * dropped below its quota
3348 */
3349 complete("a->quota_event);
3350 ret = 0;
3351 }
3352 break;
3353
3354 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3355 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3356 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3357 service->sync = value;
3358 ret = 0;
3359 }
3360 break;
3361
3362 case VCHIQ_SERVICE_OPTION_TRACE:
3363 service->trace = value;
3364 ret = 0;
3365 break;
3366
3367 default:
3368 break;
3369 }
3370 vchiq_service_put(service);
3371
3372 return ret;
3373 }
3374
3375 static int
vchiq_dump_shared_state(void * dump_context,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3376 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3377 struct vchiq_shared_state *shared, const char *label)
3378 {
3379 static const char *const debug_names[] = {
3380 "<entries>",
3381 "SLOT_HANDLER_COUNT",
3382 "SLOT_HANDLER_LINE",
3383 "PARSE_LINE",
3384 "PARSE_HEADER",
3385 "PARSE_MSGID",
3386 "AWAIT_COMPLETION_LINE",
3387 "DEQUEUE_MESSAGE_LINE",
3388 "SERVICE_CALLBACK_LINE",
3389 "MSG_QUEUE_FULL_COUNT",
3390 "COMPLETION_QUEUE_FULL_COUNT"
3391 };
3392 int i;
3393 char buf[80];
3394 int len;
3395 int err;
3396
3397 len = scnprintf(buf, sizeof(buf), " %s: slots %d-%d tx_pos=%x recycle=%x",
3398 label, shared->slot_first, shared->slot_last,
3399 shared->tx_pos, shared->slot_queue_recycle);
3400 err = vchiq_dump(dump_context, buf, len + 1);
3401 if (err)
3402 return err;
3403
3404 len = scnprintf(buf, sizeof(buf), " Slots claimed:");
3405 err = vchiq_dump(dump_context, buf, len + 1);
3406 if (err)
3407 return err;
3408
3409 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3410 struct vchiq_slot_info slot_info =
3411 *SLOT_INFO_FROM_INDEX(state, i);
3412 if (slot_info.use_count != slot_info.release_count) {
3413 len = scnprintf(buf, sizeof(buf), " %d: %d/%d", i, slot_info.use_count,
3414 slot_info.release_count);
3415 err = vchiq_dump(dump_context, buf, len + 1);
3416 if (err)
3417 return err;
3418 }
3419 }
3420
3421 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3422 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3423 debug_names[i], shared->debug[i], shared->debug[i]);
3424 err = vchiq_dump(dump_context, buf, len + 1);
3425 if (err)
3426 return err;
3427 }
3428 return 0;
3429 }
3430
vchiq_dump_state(void * dump_context,struct vchiq_state * state)3431 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3432 {
3433 char buf[80];
3434 int len;
3435 int i;
3436 int err;
3437
3438 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3439 conn_state_names[state->conn_state]);
3440 err = vchiq_dump(dump_context, buf, len + 1);
3441 if (err)
3442 return err;
3443
3444 len = scnprintf(buf, sizeof(buf), " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3445 state->local->tx_pos,
3446 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3447 state->rx_pos,
3448 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3449 err = vchiq_dump(dump_context, buf, len + 1);
3450 if (err)
3451 return err;
3452
3453 len = scnprintf(buf, sizeof(buf), " Version: %d (min %d)",
3454 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3455 err = vchiq_dump(dump_context, buf, len + 1);
3456 if (err)
3457 return err;
3458
3459 if (VCHIQ_ENABLE_STATS) {
3460 len = scnprintf(buf, sizeof(buf),
3461 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3462 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3463 state->stats.error_count);
3464 err = vchiq_dump(dump_context, buf, len + 1);
3465 if (err)
3466 return err;
3467 }
3468
3469 len = scnprintf(buf, sizeof(buf),
3470 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3471 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3472 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3473 state->data_quota - state->data_use_count,
3474 state->local->slot_queue_recycle - state->slot_queue_available,
3475 state->stats.slot_stalls, state->stats.data_stalls);
3476 err = vchiq_dump(dump_context, buf, len + 1);
3477 if (err)
3478 return err;
3479
3480 err = vchiq_dump_platform_state(dump_context);
3481 if (err)
3482 return err;
3483
3484 err = vchiq_dump_shared_state(dump_context,
3485 state,
3486 state->local,
3487 "Local");
3488 if (err)
3489 return err;
3490 err = vchiq_dump_shared_state(dump_context,
3491 state,
3492 state->remote,
3493 "Remote");
3494 if (err)
3495 return err;
3496
3497 err = vchiq_dump_platform_instances(dump_context);
3498 if (err)
3499 return err;
3500
3501 for (i = 0; i < state->unused_service; i++) {
3502 struct vchiq_service *service = find_service_by_port(state, i);
3503
3504 if (service) {
3505 err = vchiq_dump_service_state(dump_context, service);
3506 vchiq_service_put(service);
3507 if (err)
3508 return err;
3509 }
3510 }
3511 return 0;
3512 }
3513
vchiq_dump_service_state(void * dump_context,struct vchiq_service * service)3514 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3515 {
3516 char buf[80];
3517 int len;
3518 int err;
3519 unsigned int ref_count;
3520
3521 /*Don't include the lock just taken*/
3522 ref_count = kref_read(&service->ref_count) - 1;
3523 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3524 service->localport, srvstate_names[service->srvstate],
3525 ref_count);
3526
3527 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3528 char remoteport[30];
3529 struct vchiq_service_quota *quota =
3530 &service->state->service_quotas[service->localport];
3531 int fourcc = service->base.fourcc;
3532 int tx_pending, rx_pending;
3533
3534 if (service->remoteport != VCHIQ_PORT_FREE) {
3535 int len2 = scnprintf(remoteport, sizeof(remoteport),
3536 "%u", service->remoteport);
3537
3538 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3539 scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3540 " (client %x)", service->client_id);
3541 } else {
3542 strscpy(remoteport, "n/a", sizeof(remoteport));
3543 }
3544
3545 len += scnprintf(buf + len, sizeof(buf) - len,
3546 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3547 VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
3548 quota->message_use_count, quota->message_quota,
3549 quota->slot_use_count, quota->slot_quota);
3550
3551 err = vchiq_dump(dump_context, buf, len + 1);
3552 if (err)
3553 return err;
3554
3555 tx_pending = service->bulk_tx.local_insert -
3556 service->bulk_tx.remote_insert;
3557
3558 rx_pending = service->bulk_rx.local_insert -
3559 service->bulk_rx.remote_insert;
3560
3561 len = scnprintf(buf, sizeof(buf),
3562 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3563 tx_pending,
3564 tx_pending ?
3565 service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
3566 0, rx_pending, rx_pending ?
3567 service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
3568 0);
3569
3570 if (VCHIQ_ENABLE_STATS) {
3571 err = vchiq_dump(dump_context, buf, len + 1);
3572 if (err)
3573 return err;
3574
3575 len = scnprintf(buf, sizeof(buf),
3576 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3577 service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
3578 service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
3579 err = vchiq_dump(dump_context, buf, len + 1);
3580 if (err)
3581 return err;
3582
3583 len = scnprintf(buf, sizeof(buf),
3584 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3585 service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
3586 service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
3587 err = vchiq_dump(dump_context, buf, len + 1);
3588 if (err)
3589 return err;
3590
3591 len = scnprintf(buf, sizeof(buf),
3592 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3593 service->stats.quota_stalls, service->stats.slot_stalls,
3594 service->stats.bulk_stalls,
3595 service->stats.bulk_aborted_count,
3596 service->stats.error_count);
3597 }
3598 }
3599
3600 err = vchiq_dump(dump_context, buf, len + 1);
3601 if (err)
3602 return err;
3603
3604 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3605 err = vchiq_dump_platform_service_state(dump_context, service);
3606 return err;
3607 }
3608
3609 void
vchiq_loud_error_header(void)3610 vchiq_loud_error_header(void)
3611 {
3612 vchiq_log_error(vchiq_core_log_level,
3613 "============================================================================");
3614 vchiq_log_error(vchiq_core_log_level,
3615 "============================================================================");
3616 vchiq_log_error(vchiq_core_log_level, "=====");
3617 }
3618
3619 void
vchiq_loud_error_footer(void)3620 vchiq_loud_error_footer(void)
3621 {
3622 vchiq_log_error(vchiq_core_log_level, "=====");
3623 vchiq_log_error(vchiq_core_log_level,
3624 "============================================================================");
3625 vchiq_log_error(vchiq_core_log_level,
3626 "============================================================================");
3627 }
3628
vchiq_send_remote_use(struct vchiq_state * state)3629 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3630 {
3631 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3632 return VCHIQ_RETRY;
3633
3634 return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3635 }
3636
vchiq_send_remote_use_active(struct vchiq_state * state)3637 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3638 {
3639 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3640 return VCHIQ_RETRY;
3641
3642 return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3643 NULL, NULL, 0, 0);
3644 }
3645
vchiq_log_dump_mem(const char * label,u32 addr,const void * void_mem,size_t num_bytes)3646 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
3647 {
3648 const u8 *mem = void_mem;
3649 size_t offset;
3650 char line_buf[100];
3651 char *s;
3652
3653 while (num_bytes > 0) {
3654 s = line_buf;
3655
3656 for (offset = 0; offset < 16; offset++) {
3657 if (offset < num_bytes)
3658 s += scnprintf(s, 4, "%02x ", mem[offset]);
3659 else
3660 s += scnprintf(s, 4, " ");
3661 }
3662
3663 for (offset = 0; offset < 16; offset++) {
3664 if (offset < num_bytes) {
3665 u8 ch = mem[offset];
3666
3667 if ((ch < ' ') || (ch > '~'))
3668 ch = '.';
3669 *s++ = (char)ch;
3670 }
3671 }
3672 *s++ = '\0';
3673
3674 if (label && (*label != '\0'))
3675 vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
3676 else
3677 vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
3678
3679 addr += 16;
3680 mem += 16;
3681 if (num_bytes > 16)
3682 num_bytes -= 16;
3683 else
3684 num_bytes = 0;
3685 }
3686 }
3687