1 /*
2  * msg_sm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Implements upper edge functions for Bridge message module.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 #include <linux/types.h>
19 
20 /*  ----------------------------------- DSP/BIOS Bridge */
21 #include <dspbridge/dbdefs.h>
22 
23 /*  ----------------------------------- OS Adaptation Layer */
24 #include <dspbridge/sync.h>
25 
26 /*  ----------------------------------- Platform Manager */
27 #include <dspbridge/dev.h>
28 
29 /*  ----------------------------------- Others */
30 #include <dspbridge/io_sm.h>
31 
32 /*  ----------------------------------- This */
33 #include <_msg_sm.h>
34 #include <dspbridge/dspmsg.h>
35 
36 /*  ----------------------------------- Function Prototypes */
37 static int add_new_msg(struct list_head *msg_list);
38 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
39 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
40 static void free_msg_list(struct list_head *msg_list);
41 
42 /*
43  *  ======== bridge_msg_create ========
44  *      Create an object to manage message queues. Only one of these objects
45  *      can exist per device object.
46  */
bridge_msg_create(struct msg_mgr ** msg_man,struct dev_object * hdev_obj,msg_onexit msg_callback)47 int bridge_msg_create(struct msg_mgr **msg_man,
48 			     struct dev_object *hdev_obj,
49 			     msg_onexit msg_callback)
50 {
51 	struct msg_mgr *msg_mgr_obj;
52 	struct io_mgr *hio_mgr;
53 	int status = 0;
54 
55 	if (!msg_man || !msg_callback || !hdev_obj)
56 		return -EFAULT;
57 
58 	dev_get_io_mgr(hdev_obj, &hio_mgr);
59 	if (!hio_mgr)
60 		return -EFAULT;
61 
62 	*msg_man = NULL;
63 	/* Allocate msg_ctrl manager object */
64 	msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
65 	if (!msg_mgr_obj)
66 		return -ENOMEM;
67 
68 	msg_mgr_obj->on_exit = msg_callback;
69 	msg_mgr_obj->iomgr = hio_mgr;
70 	/* List of MSG_QUEUEs */
71 	INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
72 	/*
73 	 * Queues of message frames for messages to the DSP. Message
74 	 * frames will only be added to the free queue when a
75 	 * msg_queue object is created.
76 	 */
77 	INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
78 	INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
79 	spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
80 
81 	/*
82 	 * Create an event to be used by bridge_msg_put() in waiting
83 	 * for an available free frame from the message manager.
84 	 */
85 	msg_mgr_obj->sync_event =
86 		kzalloc(sizeof(struct sync_object), GFP_KERNEL);
87 	if (!msg_mgr_obj->sync_event) {
88 		kfree(msg_mgr_obj);
89 		return -ENOMEM;
90 	}
91 	sync_init_event(msg_mgr_obj->sync_event);
92 
93 	*msg_man = msg_mgr_obj;
94 
95 	return status;
96 }
97 
98 /*
99  *  ======== bridge_msg_create_queue ========
100  *      Create a msg_queue for sending/receiving messages to/from a node
101  *      on the DSP.
102  */
bridge_msg_create_queue(struct msg_mgr * hmsg_mgr,struct msg_queue ** msgq,u32 msgq_id,u32 max_msgs,void * arg)103 int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
104 				u32 msgq_id, u32 max_msgs, void *arg)
105 {
106 	u32 i;
107 	u32 num_allocated = 0;
108 	struct msg_queue *msg_q;
109 	int status = 0;
110 
111 	if (!hmsg_mgr || msgq == NULL)
112 		return -EFAULT;
113 
114 	*msgq = NULL;
115 	/* Allocate msg_queue object */
116 	msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
117 	if (!msg_q)
118 		return -ENOMEM;
119 
120 	msg_q->max_msgs = max_msgs;
121 	msg_q->msg_mgr = hmsg_mgr;
122 	msg_q->arg = arg;	/* Node handle */
123 	msg_q->msgq_id = msgq_id;	/* Node env (not valid yet) */
124 	/* Queues of Message frames for messages from the DSP */
125 	INIT_LIST_HEAD(&msg_q->msg_free_list);
126 	INIT_LIST_HEAD(&msg_q->msg_used_list);
127 
128 	/*  Create event that will be signalled when a message from
129 	 *  the DSP is available. */
130 	msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
131 	if (!msg_q->sync_event) {
132 		status = -ENOMEM;
133 		goto out_err;
134 
135 	}
136 	sync_init_event(msg_q->sync_event);
137 
138 	/* Create a notification list for message ready notification. */
139 	msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
140 	if (!msg_q->ntfy_obj) {
141 		status = -ENOMEM;
142 		goto out_err;
143 	}
144 	ntfy_init(msg_q->ntfy_obj);
145 
146 	/*  Create events that will be used to synchronize cleanup
147 	 *  when the object is deleted. sync_done will be set to
148 	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
149 	 *  will be set by the unblocked thread to signal that it
150 	 *  is unblocked and will no longer reference the object. */
151 	msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
152 	if (!msg_q->sync_done) {
153 		status = -ENOMEM;
154 		goto out_err;
155 	}
156 	sync_init_event(msg_q->sync_done);
157 
158 	msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
159 	if (!msg_q->sync_done_ack) {
160 		status = -ENOMEM;
161 		goto out_err;
162 	}
163 	sync_init_event(msg_q->sync_done_ack);
164 
165 	/* Enter critical section */
166 	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
167 	/* Initialize message frames and put in appropriate queues */
168 	for (i = 0; i < max_msgs && !status; i++) {
169 		status = add_new_msg(&hmsg_mgr->msg_free_list);
170 		if (!status) {
171 			num_allocated++;
172 			status = add_new_msg(&msg_q->msg_free_list);
173 		}
174 	}
175 	if (status) {
176 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
177 		goto out_err;
178 	}
179 
180 	list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
181 	*msgq = msg_q;
182 	/* Signal that free frames are now available */
183 	if (!list_empty(&hmsg_mgr->msg_free_list))
184 		sync_set_event(hmsg_mgr->sync_event);
185 
186 	/* Exit critical section */
187 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
188 
189 	return 0;
190 out_err:
191 	delete_msg_queue(msg_q, num_allocated);
192 	return status;
193 }
194 
195 /*
196  *  ======== bridge_msg_delete ========
197  *      Delete a msg_ctrl manager allocated in bridge_msg_create().
198  */
bridge_msg_delete(struct msg_mgr * hmsg_mgr)199 void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
200 {
201 	if (hmsg_mgr)
202 		delete_msg_mgr(hmsg_mgr);
203 }
204 
205 /*
206  *  ======== bridge_msg_delete_queue ========
207  *      Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
208  */
bridge_msg_delete_queue(struct msg_queue * msg_queue_obj)209 void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
210 {
211 	struct msg_mgr *hmsg_mgr;
212 	u32 io_msg_pend;
213 
214 	if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
215 		return;
216 
217 	hmsg_mgr = msg_queue_obj->msg_mgr;
218 	msg_queue_obj->done = true;
219 	/*  Unblock all threads blocked in MSG_Get() or MSG_Put(). */
220 	io_msg_pend = msg_queue_obj->io_msg_pend;
221 	while (io_msg_pend) {
222 		/* Unblock thread */
223 		sync_set_event(msg_queue_obj->sync_done);
224 		/* Wait for acknowledgement */
225 		sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
226 		io_msg_pend = msg_queue_obj->io_msg_pend;
227 	}
228 	/* Remove message queue from hmsg_mgr->queue_list */
229 	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
230 	list_del(&msg_queue_obj->list_elem);
231 	/* Free the message queue object */
232 	delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
233 	if (list_empty(&hmsg_mgr->msg_free_list))
234 		sync_reset_event(hmsg_mgr->sync_event);
235 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
236 }
237 
238 /*
239  *  ======== bridge_msg_get ========
240  *      Get a message from a msg_ctrl queue.
241  */
bridge_msg_get(struct msg_queue * msg_queue_obj,struct dsp_msg * pmsg,u32 utimeout)242 int bridge_msg_get(struct msg_queue *msg_queue_obj,
243 			  struct dsp_msg *pmsg, u32 utimeout)
244 {
245 	struct msg_frame *msg_frame_obj;
246 	struct msg_mgr *hmsg_mgr;
247 	struct sync_object *syncs[2];
248 	u32 index;
249 	int status = 0;
250 
251 	if (!msg_queue_obj || pmsg == NULL)
252 		return -ENOMEM;
253 
254 	hmsg_mgr = msg_queue_obj->msg_mgr;
255 
256 	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
257 	/* If a message is already there, get it */
258 	if (!list_empty(&msg_queue_obj->msg_used_list)) {
259 		msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
260 				struct msg_frame, list_elem);
261 		list_del(&msg_frame_obj->list_elem);
262 		*pmsg = msg_frame_obj->msg_data.msg;
263 		list_add_tail(&msg_frame_obj->list_elem,
264 				&msg_queue_obj->msg_free_list);
265 		if (list_empty(&msg_queue_obj->msg_used_list))
266 			sync_reset_event(msg_queue_obj->sync_event);
267 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
268 		return 0;
269 	}
270 
271 	if (msg_queue_obj->done) {
272 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
273 		return -EPERM;
274 	}
275 	msg_queue_obj->io_msg_pend++;
276 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
277 
278 	/*
279 	 * Wait til message is available, timeout, or done. We don't
280 	 * have to schedule the DPC, since the DSP will send messages
281 	 * when they are available.
282 	 */
283 	syncs[0] = msg_queue_obj->sync_event;
284 	syncs[1] = msg_queue_obj->sync_done;
285 	status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
286 
287 	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
288 	if (msg_queue_obj->done) {
289 		msg_queue_obj->io_msg_pend--;
290 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
291 		/*
292 		 * Signal that we're not going to access msg_queue_obj
293 		 * anymore, so it can be deleted.
294 		 */
295 		sync_set_event(msg_queue_obj->sync_done_ack);
296 		return -EPERM;
297 	}
298 	if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
299 		/* Get msg from used list */
300 		msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
301 				struct msg_frame, list_elem);
302 		list_del(&msg_frame_obj->list_elem);
303 		/* Copy message into pmsg and put frame on the free list */
304 		*pmsg = msg_frame_obj->msg_data.msg;
305 		list_add_tail(&msg_frame_obj->list_elem,
306 				&msg_queue_obj->msg_free_list);
307 	}
308 	msg_queue_obj->io_msg_pend--;
309 	/* Reset the event if there are still queued messages */
310 	if (!list_empty(&msg_queue_obj->msg_used_list))
311 		sync_set_event(msg_queue_obj->sync_event);
312 
313 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
314 
315 	return status;
316 }
317 
318 /*
319  *  ======== bridge_msg_put ========
320  *      Put a message onto a msg_ctrl queue.
321  */
bridge_msg_put(struct msg_queue * msg_queue_obj,const struct dsp_msg * pmsg,u32 utimeout)322 int bridge_msg_put(struct msg_queue *msg_queue_obj,
323 			  const struct dsp_msg *pmsg, u32 utimeout)
324 {
325 	struct msg_frame *msg_frame_obj;
326 	struct msg_mgr *hmsg_mgr;
327 	struct sync_object *syncs[2];
328 	u32 index;
329 	int status;
330 
331 	if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
332 		return -EFAULT;
333 
334 	hmsg_mgr = msg_queue_obj->msg_mgr;
335 
336 	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
337 
338 	/* If a message frame is available, use it */
339 	if (!list_empty(&hmsg_mgr->msg_free_list)) {
340 		msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
341 				struct msg_frame, list_elem);
342 		list_del(&msg_frame_obj->list_elem);
343 		msg_frame_obj->msg_data.msg = *pmsg;
344 		msg_frame_obj->msg_data.msgq_id =
345 			msg_queue_obj->msgq_id;
346 		list_add_tail(&msg_frame_obj->list_elem,
347 				&hmsg_mgr->msg_used_list);
348 		hmsg_mgr->msgs_pending++;
349 
350 		if (list_empty(&hmsg_mgr->msg_free_list))
351 			sync_reset_event(hmsg_mgr->sync_event);
352 
353 		/* Release critical section before scheduling DPC */
354 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
355 		/* Schedule a DPC, to do the actual data transfer: */
356 		iosm_schedule(hmsg_mgr->iomgr);
357 		return 0;
358 	}
359 
360 	if (msg_queue_obj->done) {
361 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
362 		return -EPERM;
363 	}
364 	msg_queue_obj->io_msg_pend++;
365 
366 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
367 
368 	/* Wait til a free message frame is available, timeout, or done */
369 	syncs[0] = hmsg_mgr->sync_event;
370 	syncs[1] = msg_queue_obj->sync_done;
371 	status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
372 	if (status)
373 		return status;
374 
375 	/* Enter critical section */
376 	spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
377 	if (msg_queue_obj->done) {
378 		msg_queue_obj->io_msg_pend--;
379 		/* Exit critical section */
380 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
381 		/*
382 		 * Signal that we're not going to access msg_queue_obj
383 		 * anymore, so it can be deleted.
384 		 */
385 		sync_set_event(msg_queue_obj->sync_done_ack);
386 		return -EPERM;
387 	}
388 
389 	if (list_empty(&hmsg_mgr->msg_free_list)) {
390 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
391 		return -EFAULT;
392 	}
393 
394 	/* Get msg from free list */
395 	msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
396 			struct msg_frame, list_elem);
397 	/*
398 	 * Copy message into pmsg and put frame on the
399 	 * used list.
400 	 */
401 	list_del(&msg_frame_obj->list_elem);
402 	msg_frame_obj->msg_data.msg = *pmsg;
403 	msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
404 	list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
405 	hmsg_mgr->msgs_pending++;
406 	/*
407 	 * Schedule a DPC, to do the actual
408 	 * data transfer.
409 	 */
410 	iosm_schedule(hmsg_mgr->iomgr);
411 
412 	msg_queue_obj->io_msg_pend--;
413 	/* Reset event if there are still frames available */
414 	if (!list_empty(&hmsg_mgr->msg_free_list))
415 		sync_set_event(hmsg_mgr->sync_event);
416 
417 	/* Exit critical section */
418 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
419 
420 	return 0;
421 }
422 
423 /*
424  *  ======== bridge_msg_register_notify ========
425  */
bridge_msg_register_notify(struct msg_queue * msg_queue_obj,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)426 int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
427 				   u32 event_mask, u32 notify_type,
428 				   struct dsp_notification *hnotification)
429 {
430 	int status = 0;
431 
432 	if (!msg_queue_obj || !hnotification) {
433 		status = -ENOMEM;
434 		goto func_end;
435 	}
436 
437 	if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
438 		status = -EPERM;
439 		goto func_end;
440 	}
441 
442 	if (notify_type != DSP_SIGNALEVENT) {
443 		status = -EBADR;
444 		goto func_end;
445 	}
446 
447 	if (event_mask)
448 		status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
449 						event_mask, notify_type);
450 	else
451 		status = ntfy_unregister(msg_queue_obj->ntfy_obj,
452 							hnotification);
453 
454 	if (status == -EINVAL) {
455 		/*  Not registered. Ok, since we couldn't have known. Node
456 		 *  notifications are split between node state change handled
457 		 *  by NODE, and message ready handled by msg_ctrl. */
458 		status = 0;
459 	}
460 func_end:
461 	return status;
462 }
463 
464 /*
465  *  ======== bridge_msg_set_queue_id ========
466  */
bridge_msg_set_queue_id(struct msg_queue * msg_queue_obj,u32 msgq_id)467 void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
468 {
469 	/*
470 	 *  A message queue must be created when a node is allocated,
471 	 *  so that node_register_notify() can be called before the node
472 	 *  is created. Since we don't know the node environment until the
473 	 *  node is created, we need this function to set msg_queue_obj->msgq_id
474 	 *  to the node environment, after the node is created.
475 	 */
476 	if (msg_queue_obj)
477 		msg_queue_obj->msgq_id = msgq_id;
478 }
479 
480 /*
481  *  ======== add_new_msg ========
482  *      Must be called in message manager critical section.
483  */
add_new_msg(struct list_head * msg_list)484 static int add_new_msg(struct list_head *msg_list)
485 {
486 	struct msg_frame *pmsg;
487 
488 	pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
489 	if (!pmsg)
490 		return -ENOMEM;
491 
492 	list_add_tail(&pmsg->list_elem, msg_list);
493 
494 	return 0;
495 }
496 
497 /*
498  *  ======== delete_msg_mgr ========
499  */
delete_msg_mgr(struct msg_mgr * hmsg_mgr)500 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
501 {
502 	if (!hmsg_mgr)
503 		return;
504 
505 	/* FIXME: free elements from queue_list? */
506 	free_msg_list(&hmsg_mgr->msg_free_list);
507 	free_msg_list(&hmsg_mgr->msg_used_list);
508 	kfree(hmsg_mgr->sync_event);
509 	kfree(hmsg_mgr);
510 }
511 
512 /*
513  *  ======== delete_msg_queue ========
514  */
delete_msg_queue(struct msg_queue * msg_queue_obj,u32 num_to_dsp)515 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
516 {
517 	struct msg_mgr *hmsg_mgr;
518 	struct msg_frame *pmsg, *tmp;
519 	u32 i;
520 
521 	if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
522 		return;
523 
524 	hmsg_mgr = msg_queue_obj->msg_mgr;
525 
526 	/* Pull off num_to_dsp message frames from Msg manager and free */
527 	i = 0;
528 	list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
529 			list_elem) {
530 		list_del(&pmsg->list_elem);
531 		kfree(pmsg);
532 		if (i++ >= num_to_dsp)
533 			break;
534 	}
535 
536 	free_msg_list(&msg_queue_obj->msg_free_list);
537 	free_msg_list(&msg_queue_obj->msg_used_list);
538 
539 	if (msg_queue_obj->ntfy_obj) {
540 		ntfy_delete(msg_queue_obj->ntfy_obj);
541 		kfree(msg_queue_obj->ntfy_obj);
542 	}
543 
544 	kfree(msg_queue_obj->sync_event);
545 	kfree(msg_queue_obj->sync_done);
546 	kfree(msg_queue_obj->sync_done_ack);
547 
548 	kfree(msg_queue_obj);
549 }
550 
551 /*
552  *  ======== free_msg_list ========
553  */
free_msg_list(struct list_head * msg_list)554 static void free_msg_list(struct list_head *msg_list)
555 {
556 	struct msg_frame *pmsg, *tmp;
557 
558 	if (!msg_list)
559 		return;
560 
561 	list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
562 		list_del(&pmsg->list_elem);
563 		kfree(pmsg);
564 	}
565 }
566