1 /*
2 * msg_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge message module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18 #include <linux/types.h>
19
20 /* ----------------------------------- DSP/BIOS Bridge */
21 #include <dspbridge/dbdefs.h>
22
23 /* ----------------------------------- Trace & Debug */
24 #include <dspbridge/dbc.h>
25
26 /* ----------------------------------- OS Adaptation Layer */
27 #include <dspbridge/sync.h>
28
29 /* ----------------------------------- Platform Manager */
30 #include <dspbridge/dev.h>
31
32 /* ----------------------------------- Others */
33 #include <dspbridge/io_sm.h>
34
35 /* ----------------------------------- This */
36 #include <_msg_sm.h>
37 #include <dspbridge/dspmsg.h>
38
39 /* ----------------------------------- Function Prototypes */
40 static int add_new_msg(struct list_head *msg_list);
41 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
42 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
43 static void free_msg_list(struct list_head *msg_list);
44
45 /*
46 * ======== bridge_msg_create ========
47 * Create an object to manage message queues. Only one of these objects
48 * can exist per device object.
49 */
bridge_msg_create(struct msg_mgr ** msg_man,struct dev_object * hdev_obj,msg_onexit msg_callback)50 int bridge_msg_create(struct msg_mgr **msg_man,
51 struct dev_object *hdev_obj,
52 msg_onexit msg_callback)
53 {
54 struct msg_mgr *msg_mgr_obj;
55 struct io_mgr *hio_mgr;
56 int status = 0;
57
58 if (!msg_man || !msg_callback || !hdev_obj)
59 return -EFAULT;
60
61 dev_get_io_mgr(hdev_obj, &hio_mgr);
62 if (!hio_mgr)
63 return -EFAULT;
64
65 *msg_man = NULL;
66 /* Allocate msg_ctrl manager object */
67 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
68 if (!msg_mgr_obj)
69 return -ENOMEM;
70
71 msg_mgr_obj->on_exit = msg_callback;
72 msg_mgr_obj->iomgr = hio_mgr;
73 /* List of MSG_QUEUEs */
74 INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
75 /*
76 * Queues of message frames for messages to the DSP. Message
77 * frames will only be added to the free queue when a
78 * msg_queue object is created.
79 */
80 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
81 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
82 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
83
84 /*
85 * Create an event to be used by bridge_msg_put() in waiting
86 * for an available free frame from the message manager.
87 */
88 msg_mgr_obj->sync_event =
89 kzalloc(sizeof(struct sync_object), GFP_KERNEL);
90 if (!msg_mgr_obj->sync_event) {
91 kfree(msg_mgr_obj);
92 return -ENOMEM;
93 }
94 sync_init_event(msg_mgr_obj->sync_event);
95
96 *msg_man = msg_mgr_obj;
97
98 return status;
99 }
100
101 /*
102 * ======== bridge_msg_create_queue ========
103 * Create a msg_queue for sending/receiving messages to/from a node
104 * on the DSP.
105 */
bridge_msg_create_queue(struct msg_mgr * hmsg_mgr,struct msg_queue ** msgq,u32 msgq_id,u32 max_msgs,void * arg)106 int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
107 u32 msgq_id, u32 max_msgs, void *arg)
108 {
109 u32 i;
110 u32 num_allocated = 0;
111 struct msg_queue *msg_q;
112 int status = 0;
113
114 if (!hmsg_mgr || msgq == NULL)
115 return -EFAULT;
116
117 *msgq = NULL;
118 /* Allocate msg_queue object */
119 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
120 if (!msg_q)
121 return -ENOMEM;
122
123 msg_q->max_msgs = max_msgs;
124 msg_q->msg_mgr = hmsg_mgr;
125 msg_q->arg = arg; /* Node handle */
126 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
127 /* Queues of Message frames for messages from the DSP */
128 INIT_LIST_HEAD(&msg_q->msg_free_list);
129 INIT_LIST_HEAD(&msg_q->msg_used_list);
130
131 /* Create event that will be signalled when a message from
132 * the DSP is available. */
133 msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
134 if (!msg_q->sync_event) {
135 status = -ENOMEM;
136 goto out_err;
137
138 }
139 sync_init_event(msg_q->sync_event);
140
141 /* Create a notification list for message ready notification. */
142 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
143 if (!msg_q->ntfy_obj) {
144 status = -ENOMEM;
145 goto out_err;
146 }
147 ntfy_init(msg_q->ntfy_obj);
148
149 /* Create events that will be used to synchronize cleanup
150 * when the object is deleted. sync_done will be set to
151 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
152 * will be set by the unblocked thread to signal that it
153 * is unblocked and will no longer reference the object. */
154 msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
155 if (!msg_q->sync_done) {
156 status = -ENOMEM;
157 goto out_err;
158 }
159 sync_init_event(msg_q->sync_done);
160
161 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
162 if (!msg_q->sync_done_ack) {
163 status = -ENOMEM;
164 goto out_err;
165 }
166 sync_init_event(msg_q->sync_done_ack);
167
168 /* Enter critical section */
169 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
170 /* Initialize message frames and put in appropriate queues */
171 for (i = 0; i < max_msgs && !status; i++) {
172 status = add_new_msg(&hmsg_mgr->msg_free_list);
173 if (!status) {
174 num_allocated++;
175 status = add_new_msg(&msg_q->msg_free_list);
176 }
177 }
178 if (status) {
179 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
180 goto out_err;
181 }
182
183 list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
184 *msgq = msg_q;
185 /* Signal that free frames are now available */
186 if (!list_empty(&hmsg_mgr->msg_free_list))
187 sync_set_event(hmsg_mgr->sync_event);
188
189 /* Exit critical section */
190 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
191
192 return 0;
193 out_err:
194 delete_msg_queue(msg_q, num_allocated);
195 return status;
196 }
197
198 /*
199 * ======== bridge_msg_delete ========
200 * Delete a msg_ctrl manager allocated in bridge_msg_create().
201 */
bridge_msg_delete(struct msg_mgr * hmsg_mgr)202 void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
203 {
204 if (hmsg_mgr)
205 delete_msg_mgr(hmsg_mgr);
206 }
207
208 /*
209 * ======== bridge_msg_delete_queue ========
210 * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
211 */
bridge_msg_delete_queue(struct msg_queue * msg_queue_obj)212 void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
213 {
214 struct msg_mgr *hmsg_mgr;
215 u32 io_msg_pend;
216
217 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
218 return;
219
220 hmsg_mgr = msg_queue_obj->msg_mgr;
221 msg_queue_obj->done = true;
222 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
223 io_msg_pend = msg_queue_obj->io_msg_pend;
224 while (io_msg_pend) {
225 /* Unblock thread */
226 sync_set_event(msg_queue_obj->sync_done);
227 /* Wait for acknowledgement */
228 sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
229 io_msg_pend = msg_queue_obj->io_msg_pend;
230 }
231 /* Remove message queue from hmsg_mgr->queue_list */
232 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
233 list_del(&msg_queue_obj->list_elem);
234 /* Free the message queue object */
235 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
236 if (list_empty(&hmsg_mgr->msg_free_list))
237 sync_reset_event(hmsg_mgr->sync_event);
238 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
239 }
240
241 /*
242 * ======== bridge_msg_get ========
243 * Get a message from a msg_ctrl queue.
244 */
bridge_msg_get(struct msg_queue * msg_queue_obj,struct dsp_msg * pmsg,u32 utimeout)245 int bridge_msg_get(struct msg_queue *msg_queue_obj,
246 struct dsp_msg *pmsg, u32 utimeout)
247 {
248 struct msg_frame *msg_frame_obj;
249 struct msg_mgr *hmsg_mgr;
250 struct sync_object *syncs[2];
251 u32 index;
252 int status = 0;
253
254 if (!msg_queue_obj || pmsg == NULL)
255 return -ENOMEM;
256
257 hmsg_mgr = msg_queue_obj->msg_mgr;
258
259 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
260 /* If a message is already there, get it */
261 if (!list_empty(&msg_queue_obj->msg_used_list)) {
262 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
263 struct msg_frame, list_elem);
264 list_del(&msg_frame_obj->list_elem);
265 *pmsg = msg_frame_obj->msg_data.msg;
266 list_add_tail(&msg_frame_obj->list_elem,
267 &msg_queue_obj->msg_free_list);
268 if (list_empty(&msg_queue_obj->msg_used_list))
269 sync_reset_event(msg_queue_obj->sync_event);
270 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
271 return 0;
272 }
273
274 if (msg_queue_obj->done) {
275 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
276 return -EPERM;
277 }
278 msg_queue_obj->io_msg_pend++;
279 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
280
281 /*
282 * Wait til message is available, timeout, or done. We don't
283 * have to schedule the DPC, since the DSP will send messages
284 * when they are available.
285 */
286 syncs[0] = msg_queue_obj->sync_event;
287 syncs[1] = msg_queue_obj->sync_done;
288 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
289
290 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
291 if (msg_queue_obj->done) {
292 msg_queue_obj->io_msg_pend--;
293 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
294 /*
295 * Signal that we're not going to access msg_queue_obj
296 * anymore, so it can be deleted.
297 */
298 sync_set_event(msg_queue_obj->sync_done_ack);
299 return -EPERM;
300 }
301 if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
302 /* Get msg from used list */
303 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
304 struct msg_frame, list_elem);
305 list_del(&msg_frame_obj->list_elem);
306 /* Copy message into pmsg and put frame on the free list */
307 *pmsg = msg_frame_obj->msg_data.msg;
308 list_add_tail(&msg_frame_obj->list_elem,
309 &msg_queue_obj->msg_free_list);
310 }
311 msg_queue_obj->io_msg_pend--;
312 /* Reset the event if there are still queued messages */
313 if (!list_empty(&msg_queue_obj->msg_used_list))
314 sync_set_event(msg_queue_obj->sync_event);
315
316 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
317
318 return status;
319 }
320
321 /*
322 * ======== bridge_msg_put ========
323 * Put a message onto a msg_ctrl queue.
324 */
bridge_msg_put(struct msg_queue * msg_queue_obj,const struct dsp_msg * pmsg,u32 utimeout)325 int bridge_msg_put(struct msg_queue *msg_queue_obj,
326 const struct dsp_msg *pmsg, u32 utimeout)
327 {
328 struct msg_frame *msg_frame_obj;
329 struct msg_mgr *hmsg_mgr;
330 struct sync_object *syncs[2];
331 u32 index;
332 int status;
333
334 if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
335 return -EFAULT;
336
337 hmsg_mgr = msg_queue_obj->msg_mgr;
338
339 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
340
341 /* If a message frame is available, use it */
342 if (!list_empty(&hmsg_mgr->msg_free_list)) {
343 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
344 struct msg_frame, list_elem);
345 list_del(&msg_frame_obj->list_elem);
346 msg_frame_obj->msg_data.msg = *pmsg;
347 msg_frame_obj->msg_data.msgq_id =
348 msg_queue_obj->msgq_id;
349 list_add_tail(&msg_frame_obj->list_elem,
350 &hmsg_mgr->msg_used_list);
351 hmsg_mgr->msgs_pending++;
352
353 if (list_empty(&hmsg_mgr->msg_free_list))
354 sync_reset_event(hmsg_mgr->sync_event);
355
356 /* Release critical section before scheduling DPC */
357 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
358 /* Schedule a DPC, to do the actual data transfer: */
359 iosm_schedule(hmsg_mgr->iomgr);
360 return 0;
361 }
362
363 if (msg_queue_obj->done) {
364 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
365 return -EPERM;
366 }
367 msg_queue_obj->io_msg_pend++;
368
369 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
370
371 /* Wait til a free message frame is available, timeout, or done */
372 syncs[0] = hmsg_mgr->sync_event;
373 syncs[1] = msg_queue_obj->sync_done;
374 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
375 if (status)
376 return status;
377
378 /* Enter critical section */
379 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
380 if (msg_queue_obj->done) {
381 msg_queue_obj->io_msg_pend--;
382 /* Exit critical section */
383 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
384 /*
385 * Signal that we're not going to access msg_queue_obj
386 * anymore, so it can be deleted.
387 */
388 sync_set_event(msg_queue_obj->sync_done_ack);
389 return -EPERM;
390 }
391
392 if (list_empty(&hmsg_mgr->msg_free_list)) {
393 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
394 return -EFAULT;
395 }
396
397 /* Get msg from free list */
398 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
399 struct msg_frame, list_elem);
400 /*
401 * Copy message into pmsg and put frame on the
402 * used list.
403 */
404 list_del(&msg_frame_obj->list_elem);
405 msg_frame_obj->msg_data.msg = *pmsg;
406 msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
407 list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
408 hmsg_mgr->msgs_pending++;
409 /*
410 * Schedule a DPC, to do the actual
411 * data transfer.
412 */
413 iosm_schedule(hmsg_mgr->iomgr);
414
415 msg_queue_obj->io_msg_pend--;
416 /* Reset event if there are still frames available */
417 if (!list_empty(&hmsg_mgr->msg_free_list))
418 sync_set_event(hmsg_mgr->sync_event);
419
420 /* Exit critical section */
421 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
422
423 return 0;
424 }
425
426 /*
427 * ======== bridge_msg_register_notify ========
428 */
bridge_msg_register_notify(struct msg_queue * msg_queue_obj,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)429 int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
430 u32 event_mask, u32 notify_type,
431 struct dsp_notification *hnotification)
432 {
433 int status = 0;
434
435 if (!msg_queue_obj || !hnotification) {
436 status = -ENOMEM;
437 goto func_end;
438 }
439
440 if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
441 status = -EPERM;
442 goto func_end;
443 }
444
445 if (notify_type != DSP_SIGNALEVENT) {
446 status = -EBADR;
447 goto func_end;
448 }
449
450 if (event_mask)
451 status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
452 event_mask, notify_type);
453 else
454 status = ntfy_unregister(msg_queue_obj->ntfy_obj,
455 hnotification);
456
457 if (status == -EINVAL) {
458 /* Not registered. Ok, since we couldn't have known. Node
459 * notifications are split between node state change handled
460 * by NODE, and message ready handled by msg_ctrl. */
461 status = 0;
462 }
463 func_end:
464 return status;
465 }
466
467 /*
468 * ======== bridge_msg_set_queue_id ========
469 */
bridge_msg_set_queue_id(struct msg_queue * msg_queue_obj,u32 msgq_id)470 void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
471 {
472 /*
473 * A message queue must be created when a node is allocated,
474 * so that node_register_notify() can be called before the node
475 * is created. Since we don't know the node environment until the
476 * node is created, we need this function to set msg_queue_obj->msgq_id
477 * to the node environment, after the node is created.
478 */
479 if (msg_queue_obj)
480 msg_queue_obj->msgq_id = msgq_id;
481 }
482
483 /*
484 * ======== add_new_msg ========
485 * Must be called in message manager critical section.
486 */
add_new_msg(struct list_head * msg_list)487 static int add_new_msg(struct list_head *msg_list)
488 {
489 struct msg_frame *pmsg;
490
491 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
492 if (!pmsg)
493 return -ENOMEM;
494
495 list_add_tail(&pmsg->list_elem, msg_list);
496
497 return 0;
498 }
499
500 /*
501 * ======== delete_msg_mgr ========
502 */
delete_msg_mgr(struct msg_mgr * hmsg_mgr)503 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
504 {
505 if (!hmsg_mgr)
506 return;
507
508 /* FIXME: free elements from queue_list? */
509 free_msg_list(&hmsg_mgr->msg_free_list);
510 free_msg_list(&hmsg_mgr->msg_used_list);
511 kfree(hmsg_mgr->sync_event);
512 kfree(hmsg_mgr);
513 }
514
515 /*
516 * ======== delete_msg_queue ========
517 */
delete_msg_queue(struct msg_queue * msg_queue_obj,u32 num_to_dsp)518 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
519 {
520 struct msg_mgr *hmsg_mgr;
521 struct msg_frame *pmsg, *tmp;
522 u32 i;
523
524 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
525 return;
526
527 hmsg_mgr = msg_queue_obj->msg_mgr;
528
529 /* Pull off num_to_dsp message frames from Msg manager and free */
530 i = 0;
531 list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
532 list_elem) {
533 list_del(&pmsg->list_elem);
534 kfree(pmsg);
535 if (i++ >= num_to_dsp)
536 break;
537 }
538
539 free_msg_list(&msg_queue_obj->msg_free_list);
540 free_msg_list(&msg_queue_obj->msg_used_list);
541
542 if (msg_queue_obj->ntfy_obj) {
543 ntfy_delete(msg_queue_obj->ntfy_obj);
544 kfree(msg_queue_obj->ntfy_obj);
545 }
546
547 kfree(msg_queue_obj->sync_event);
548 kfree(msg_queue_obj->sync_done);
549 kfree(msg_queue_obj->sync_done_ack);
550
551 kfree(msg_queue_obj);
552 }
553
554 /*
555 * ======== free_msg_list ========
556 */
free_msg_list(struct list_head * msg_list)557 static void free_msg_list(struct list_head *msg_list)
558 {
559 struct msg_frame *pmsg, *tmp;
560
561 if (!msg_list)
562 return;
563
564 list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
565 list_del(&pmsg->list_elem);
566 kfree(pmsg);
567 }
568 }
569