1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #ifndef VCHIQ_CORE_H
5 #define VCHIQ_CORE_H
6 
7 #include <linux/mutex.h>
8 #include <linux/completion.h>
9 #include <linux/kthread.h>
10 #include <linux/kref.h>
11 #include <linux/rcupdate.h>
12 #include <linux/wait.h>
13 #include <linux/raspberrypi/vchiq.h>
14 
15 #include "vchiq_cfg.h"
16 
17 /* Do this so that we can test-build the code on non-rpi systems */
18 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
19 
20 #else
21 
22 #ifndef dsb
23 #define dsb(a)
24 #endif
25 
26 #endif	/* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
27 
28 #define VCHIQ_SERVICE_HANDLE_INVALID 0
29 
30 #define VCHIQ_SLOT_SIZE     4096
31 #define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
32 
33 /* Run time control of log level, based on KERN_XXX level. */
34 #define VCHIQ_LOG_DEFAULT  4
35 #define VCHIQ_LOG_ERROR    3
36 #define VCHIQ_LOG_WARNING  4
37 #define VCHIQ_LOG_INFO     6
38 #define VCHIQ_LOG_TRACE    7
39 
40 #define VCHIQ_LOG_PREFIX   KERN_INFO "vchiq: "
41 
42 #ifndef vchiq_log_error
43 #define vchiq_log_error(cat, fmt, ...) \
44 	do { if (cat >= VCHIQ_LOG_ERROR) \
45 		printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
46 #endif
47 #ifndef vchiq_log_warning
48 #define vchiq_log_warning(cat, fmt, ...) \
49 	do { if (cat >= VCHIQ_LOG_WARNING) \
50 		 printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
51 #endif
52 #ifndef vchiq_log_info
53 #define vchiq_log_info(cat, fmt, ...) \
54 	do { if (cat >= VCHIQ_LOG_INFO) \
55 		printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
56 #endif
57 #ifndef vchiq_log_trace
58 #define vchiq_log_trace(cat, fmt, ...) \
59 	do { if (cat >= VCHIQ_LOG_TRACE) \
60 		printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
61 #endif
62 
63 #define vchiq_loud_error(...) \
64 	vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
65 
66 #define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
67 #define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
68 #define VCHIQ_SLOT_ZERO_SLOTS  DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
69 					    VCHIQ_SLOT_SIZE)
70 
71 #define VCHIQ_FOURCC_AS_4CHARS(fourcc)	\
72 	((fourcc) >> 24) & 0xff, \
73 	((fourcc) >> 16) & 0xff, \
74 	((fourcc) >>  8) & 0xff, \
75 	(fourcc) & 0xff
76 
77 #define BITSET_SIZE(b)        ((b + 31) >> 5)
78 #define BITSET_WORD(b)        (b >> 5)
79 #define BITSET_BIT(b)         (1 << (b & 31))
80 #define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
81 #define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
82 
83 enum {
84 	DEBUG_ENTRIES,
85 #if VCHIQ_ENABLE_DEBUG
86 	DEBUG_SLOT_HANDLER_COUNT,
87 	DEBUG_SLOT_HANDLER_LINE,
88 	DEBUG_PARSE_LINE,
89 	DEBUG_PARSE_HEADER,
90 	DEBUG_PARSE_MSGID,
91 	DEBUG_AWAIT_COMPLETION_LINE,
92 	DEBUG_DEQUEUE_MESSAGE_LINE,
93 	DEBUG_SERVICE_CALLBACK_LINE,
94 	DEBUG_MSG_QUEUE_FULL_COUNT,
95 	DEBUG_COMPLETION_QUEUE_FULL_COUNT,
96 #endif
97 	DEBUG_MAX
98 };
99 
100 #if VCHIQ_ENABLE_DEBUG
101 
102 #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
103 #define DEBUG_TRACE(d) \
104 	do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
105 #define DEBUG_VALUE(d, v) \
106 	do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
107 #define DEBUG_COUNT(d) \
108 	do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
109 
110 #else /* VCHIQ_ENABLE_DEBUG */
111 
112 #define DEBUG_INITIALISE(local)
113 #define DEBUG_TRACE(d)
114 #define DEBUG_VALUE(d, v)
115 #define DEBUG_COUNT(d)
116 
117 #endif /* VCHIQ_ENABLE_DEBUG */
118 
119 enum vchiq_connstate {
120 	VCHIQ_CONNSTATE_DISCONNECTED,
121 	VCHIQ_CONNSTATE_CONNECTING,
122 	VCHIQ_CONNSTATE_CONNECTED,
123 	VCHIQ_CONNSTATE_PAUSING,
124 	VCHIQ_CONNSTATE_PAUSE_SENT,
125 	VCHIQ_CONNSTATE_PAUSED,
126 	VCHIQ_CONNSTATE_RESUMING,
127 	VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
128 	VCHIQ_CONNSTATE_RESUME_TIMEOUT
129 };
130 
131 enum {
132 	VCHIQ_SRVSTATE_FREE,
133 	VCHIQ_SRVSTATE_HIDDEN,
134 	VCHIQ_SRVSTATE_LISTENING,
135 	VCHIQ_SRVSTATE_OPENING,
136 	VCHIQ_SRVSTATE_OPEN,
137 	VCHIQ_SRVSTATE_OPENSYNC,
138 	VCHIQ_SRVSTATE_CLOSESENT,
139 	VCHIQ_SRVSTATE_CLOSERECVD,
140 	VCHIQ_SRVSTATE_CLOSEWAIT,
141 	VCHIQ_SRVSTATE_CLOSED
142 };
143 
144 enum vchiq_bulk_dir {
145 	VCHIQ_BULK_TRANSMIT,
146 	VCHIQ_BULK_RECEIVE
147 };
148 
149 struct vchiq_bulk {
150 	short mode;
151 	short dir;
152 	void *userdata;
153 	dma_addr_t data;
154 	int size;
155 	void *remote_data;
156 	int remote_size;
157 	int actual;
158 };
159 
160 struct vchiq_bulk_queue {
161 	int local_insert;  /* Where to insert the next local bulk */
162 	int remote_insert; /* Where to insert the next remote bulk (master) */
163 	int process;       /* Bulk to transfer next */
164 	int remote_notify; /* Bulk to notify the remote client of next (mstr) */
165 	int remove;        /* Bulk to notify the local client of, and remove, next */
166 	struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
167 };
168 
169 struct remote_event {
170 	int armed;
171 	int fired;
172 	u32 __unused;
173 };
174 
175 struct opaque_platform_state;
176 
177 struct vchiq_slot {
178 	char data[VCHIQ_SLOT_SIZE];
179 };
180 
181 struct vchiq_slot_info {
182 	/* Use two counters rather than one to avoid the need for a mutex. */
183 	short use_count;
184 	short release_count;
185 };
186 
187 struct vchiq_service {
188 	struct vchiq_service_base base;
189 	unsigned int handle;
190 	struct kref ref_count;
191 	struct rcu_head rcu;
192 	int srvstate;
193 	void (*userdata_term)(void *userdata);
194 	unsigned int localport;
195 	unsigned int remoteport;
196 	int public_fourcc;
197 	int client_id;
198 	char auto_close;
199 	char sync;
200 	char closing;
201 	char trace;
202 	atomic_t poll_flags;
203 	short version;
204 	short version_min;
205 	short peer_version;
206 
207 	struct vchiq_state *state;
208 	struct vchiq_instance *instance;
209 
210 	int service_use_count;
211 
212 	struct vchiq_bulk_queue bulk_tx;
213 	struct vchiq_bulk_queue bulk_rx;
214 
215 	struct completion remove_event;
216 	struct completion bulk_remove_event;
217 	struct mutex bulk_mutex;
218 
219 	struct service_stats_struct {
220 		int quota_stalls;
221 		int slot_stalls;
222 		int bulk_stalls;
223 		int error_count;
224 		int ctrl_tx_count;
225 		int ctrl_rx_count;
226 		int bulk_tx_count;
227 		int bulk_rx_count;
228 		int bulk_aborted_count;
229 		u64 ctrl_tx_bytes;
230 		u64 ctrl_rx_bytes;
231 		u64 bulk_tx_bytes;
232 		u64 bulk_rx_bytes;
233 	} stats;
234 
235 	int msg_queue_read;
236 	int msg_queue_write;
237 	struct completion msg_queue_pop;
238 	struct completion msg_queue_push;
239 	struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
240 };
241 
242 /*
243  * The quota information is outside struct vchiq_service so that it can
244  * be statically allocated, since for accounting reasons a service's slot
245  * usage is carried over between users of the same port number.
246  */
247 struct vchiq_service_quota {
248 	unsigned short slot_quota;
249 	unsigned short slot_use_count;
250 	unsigned short message_quota;
251 	unsigned short message_use_count;
252 	struct completion quota_event;
253 	int previous_tx_index;
254 };
255 
256 struct vchiq_shared_state {
257 	/* A non-zero value here indicates that the content is valid. */
258 	int initialised;
259 
260 	/* The first and last (inclusive) slots allocated to the owner. */
261 	int slot_first;
262 	int slot_last;
263 
264 	/* The slot allocated to synchronous messages from the owner. */
265 	int slot_sync;
266 
267 	/*
268 	 * Signalling this event indicates that owner's slot handler thread
269 	 * should run.
270 	 */
271 	struct remote_event trigger;
272 
273 	/*
274 	 * Indicates the byte position within the stream where the next message
275 	 * will be written. The least significant bits are an index into the
276 	 * slot. The next bits are the index of the slot in slot_queue.
277 	 */
278 	int tx_pos;
279 
280 	/* This event should be signalled when a slot is recycled. */
281 	struct remote_event recycle;
282 
283 	/* The slot_queue index where the next recycled slot will be written. */
284 	int slot_queue_recycle;
285 
286 	/* This event should be signalled when a synchronous message is sent. */
287 	struct remote_event sync_trigger;
288 
289 	/*
290 	 * This event should be signalled when a synchronous message has been
291 	 * released.
292 	 */
293 	struct remote_event sync_release;
294 
295 	/* A circular buffer of slot indexes. */
296 	int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
297 
298 	/* Debugging state */
299 	int debug[DEBUG_MAX];
300 };
301 
302 struct vchiq_slot_zero {
303 	int magic;
304 	short version;
305 	short version_min;
306 	int slot_zero_size;
307 	int slot_size;
308 	int max_slots;
309 	int max_slots_per_side;
310 	int platform_data[2];
311 	struct vchiq_shared_state master;
312 	struct vchiq_shared_state slave;
313 	struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
314 };
315 
316 struct vchiq_state {
317 	int id;
318 	int initialised;
319 	enum vchiq_connstate conn_state;
320 	short version_common;
321 
322 	struct vchiq_shared_state *local;
323 	struct vchiq_shared_state *remote;
324 	struct vchiq_slot *slot_data;
325 
326 	unsigned short default_slot_quota;
327 	unsigned short default_message_quota;
328 
329 	/* Event indicating connect message received */
330 	struct completion connect;
331 
332 	/* Mutex protecting services */
333 	struct mutex mutex;
334 	struct vchiq_instance **instance;
335 
336 	/* Processes incoming messages */
337 	struct task_struct *slot_handler_thread;
338 
339 	/* Processes recycled slots */
340 	struct task_struct *recycle_thread;
341 
342 	/* Processes synchronous messages */
343 	struct task_struct *sync_thread;
344 
345 	/* Local implementation of the trigger remote event */
346 	wait_queue_head_t trigger_event;
347 
348 	/* Local implementation of the recycle remote event */
349 	wait_queue_head_t recycle_event;
350 
351 	/* Local implementation of the sync trigger remote event */
352 	wait_queue_head_t sync_trigger_event;
353 
354 	/* Local implementation of the sync release remote event */
355 	wait_queue_head_t sync_release_event;
356 
357 	char *tx_data;
358 	char *rx_data;
359 	struct vchiq_slot_info *rx_info;
360 
361 	struct mutex slot_mutex;
362 
363 	struct mutex recycle_mutex;
364 
365 	struct mutex sync_mutex;
366 
367 	struct mutex bulk_transfer_mutex;
368 
369 	/*
370 	 * Indicates the byte position within the stream from where the next
371 	 * message will be read. The least significant bits are an index into
372 	 * the slot.The next bits are the index of the slot in
373 	 * remote->slot_queue.
374 	 */
375 	int rx_pos;
376 
377 	/*
378 	 * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
379 	 * from remote->tx_pos.
380 	 */
381 	int local_tx_pos;
382 
383 	/* The slot_queue index of the slot to become available next. */
384 	int slot_queue_available;
385 
386 	/* A flag to indicate if any poll has been requested */
387 	int poll_needed;
388 
389 	/* Ths index of the previous slot used for data messages. */
390 	int previous_data_index;
391 
392 	/* The number of slots occupied by data messages. */
393 	unsigned short data_use_count;
394 
395 	/* The maximum number of slots to be occupied by data messages. */
396 	unsigned short data_quota;
397 
398 	/* An array of bit sets indicating which services must be polled. */
399 	atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
400 
401 	/* The number of the first unused service */
402 	int unused_service;
403 
404 	/* Signalled when a free slot becomes available. */
405 	struct completion slot_available_event;
406 
407 	struct completion slot_remove_event;
408 
409 	/* Signalled when a free data slot becomes available. */
410 	struct completion data_quota_event;
411 
412 	struct state_stats_struct {
413 		int slot_stalls;
414 		int data_stalls;
415 		int ctrl_tx_count;
416 		int ctrl_rx_count;
417 		int error_count;
418 	} stats;
419 
420 	struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
421 	struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
422 	struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
423 
424 	struct opaque_platform_state *platform_state;
425 };
426 
427 struct bulk_waiter {
428 	struct vchiq_bulk *bulk;
429 	struct completion event;
430 	int actual;
431 };
432 
433 struct vchiq_config {
434 	unsigned int max_msg_size;
435 	unsigned int bulk_threshold;	/* The message size above which it
436 					 * is better to use a bulk transfer
437 					 * (<= max_msg_size)
438 					 */
439 	unsigned int max_outstanding_bulks;
440 	unsigned int max_services;
441 	short version;      /* The version of VCHIQ */
442 	short version_min;  /* The minimum compatible version of VCHIQ */
443 };
444 
445 extern spinlock_t bulk_waiter_spinlock;
446 
447 extern int vchiq_core_log_level;
448 extern int vchiq_core_msg_log_level;
449 extern int vchiq_sync_log_level;
450 
451 extern struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
452 
453 extern const char *
454 get_conn_state_name(enum vchiq_connstate conn_state);
455 
456 extern struct vchiq_slot_zero *
457 vchiq_init_slots(void *mem_base, int mem_size);
458 
459 extern int
460 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
461 
462 extern enum vchiq_status
463 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
464 
465 struct vchiq_service *
466 vchiq_add_service_internal(struct vchiq_state *state,
467 			   const struct vchiq_service_params_kernel *params,
468 			   int srvstate, struct vchiq_instance *instance,
469 			   void (*userdata_term)(void *userdata));
470 
471 extern enum vchiq_status
472 vchiq_open_service_internal(struct vchiq_service *service, int client_id);
473 
474 extern enum vchiq_status
475 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
476 
477 extern void
478 vchiq_terminate_service_internal(struct vchiq_service *service);
479 
480 extern void
481 vchiq_free_service_internal(struct vchiq_service *service);
482 
483 extern void
484 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
485 
486 extern void
487 remote_event_pollall(struct vchiq_state *state);
488 
489 extern enum vchiq_status
490 vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
491 		    int size, void *userdata, enum vchiq_bulk_mode mode,
492 		    enum vchiq_bulk_dir dir);
493 
494 extern int
495 vchiq_dump_state(void *dump_context, struct vchiq_state *state);
496 
497 extern int
498 vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
499 
500 extern void
501 vchiq_loud_error_header(void);
502 
503 extern void
504 vchiq_loud_error_footer(void);
505 
506 extern void
507 request_poll(struct vchiq_state *state, struct vchiq_service *service,
508 	     int poll_type);
509 
510 static inline struct vchiq_service *
handle_to_service(unsigned int handle)511 handle_to_service(unsigned int handle)
512 {
513 	int idx = handle & (VCHIQ_MAX_SERVICES - 1);
514 	struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
515 		(VCHIQ_MAX_STATES - 1)];
516 
517 	if (!state)
518 		return NULL;
519 	return rcu_dereference(state->services[idx]);
520 }
521 
522 extern struct vchiq_service *
523 find_service_by_handle(unsigned int handle);
524 
525 extern struct vchiq_service *
526 find_service_by_port(struct vchiq_state *state, unsigned int localport);
527 
528 extern struct vchiq_service *
529 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
530 
531 extern struct vchiq_service *
532 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
533 
534 extern struct vchiq_service *
535 __next_service_by_instance(struct vchiq_state *state,
536 			   struct vchiq_instance *instance,
537 			   int *pidx);
538 
539 extern struct vchiq_service *
540 next_service_by_instance(struct vchiq_state *state,
541 			 struct vchiq_instance *instance,
542 			 int *pidx);
543 
544 extern void
545 vchiq_service_get(struct vchiq_service *service);
546 
547 extern void
548 vchiq_service_put(struct vchiq_service *service);
549 
550 extern enum vchiq_status
551 vchiq_queue_message(unsigned int handle,
552 		    ssize_t (*copy_callback)(void *context, void *dest,
553 					     size_t offset, size_t maxsize),
554 		    void *context,
555 		    size_t size);
556 
557 int vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, void __user *uoffset,
558 			    int size, int dir);
559 
560 void vchiq_complete_bulk(struct vchiq_bulk *bulk);
561 
562 void remote_event_signal(struct remote_event *event);
563 
564 int vchiq_dump(void *dump_context, const char *str, int len);
565 
566 int vchiq_dump_platform_state(void *dump_context);
567 
568 int vchiq_dump_platform_instances(void *dump_context);
569 
570 int vchiq_dump_platform_service_state(void *dump_context, struct vchiq_service *service);
571 
572 int vchiq_use_service_internal(struct vchiq_service *service);
573 
574 int vchiq_release_service_internal(struct vchiq_service *service);
575 
576 void vchiq_on_remote_use(struct vchiq_state *state);
577 
578 void vchiq_on_remote_release(struct vchiq_state *state);
579 
580 int vchiq_platform_init_state(struct vchiq_state *state);
581 
582 enum vchiq_status vchiq_check_service(struct vchiq_service *service);
583 
584 void vchiq_on_remote_use_active(struct vchiq_state *state);
585 
586 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state);
587 
588 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state);
589 
590 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
591 				       enum vchiq_connstate oldstate,
592 				  enum vchiq_connstate newstate);
593 
594 void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
595 
596 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes);
597 
598 enum vchiq_status vchiq_remove_service(unsigned int service);
599 
600 int vchiq_get_client_id(unsigned int service);
601 
602 void vchiq_get_config(struct vchiq_config *config);
603 
604 int vchiq_set_service_option(unsigned int service, enum vchiq_service_option option, int value);
605 
606 #endif
607