1 /*
2 * chnl_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge driver channel module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 /*
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
22 *
23 * Care is taken in this code to prevent simulataneous access to channel
24 * queues from
25 * 1. Threads.
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
27 *
28 * This is done primarily by:
29 * - Semaphores.
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33 *
34 * Channel Invariant:
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is:
39 *
40 * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
41 * and
42 * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
43 */
44
45 #include <linux/types.h>
46
47 /* ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
49
50 /* ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
52
53 /* ----------------------------------- Trace & Debug */
54 #include <dspbridge/dbc.h>
55
56 /* ----------------------------------- OS Adaptation Layer */
57 #include <dspbridge/sync.h>
58
59 /* ----------------------------------- Bridge Driver */
60 #include <dspbridge/dspdefs.h>
61 #include <dspbridge/dspchnl.h>
62 #include "_tiomap.h"
63
64 /* ----------------------------------- Platform Manager */
65 #include <dspbridge/dev.h>
66
67 /* ----------------------------------- Others */
68 #include <dspbridge/io_sm.h>
69
70 /* ----------------------------------- Define for This */
71 #define USERMODE_ADDR PAGE_OFFSET
72
73 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
74
75 /* ----------------------------------- Function Prototypes */
76 static int create_chirp_list(struct list_head *list, u32 chirps);
77
78 static void free_chirp_list(struct list_head *list);
79
80 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
81 u32 *chnl);
82
83 /*
84 * ======== bridge_chnl_add_io_req ========
85 * Enqueue an I/O request for data transfer on a channel to the DSP.
86 * The direction (mode) is specified in the channel object. Note the DSP
87 * address is specified for channels opened in direct I/O mode.
88 */
bridge_chnl_add_io_req(struct chnl_object * chnl_obj,void * host_buf,u32 byte_size,u32 buf_size,u32 dw_dsp_addr,u32 dw_arg)89 int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
90 u32 byte_size, u32 buf_size,
91 u32 dw_dsp_addr, u32 dw_arg)
92 {
93 int status = 0;
94 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
95 struct chnl_irp *chnl_packet_obj = NULL;
96 struct bridge_dev_context *dev_ctxt;
97 struct dev_object *dev_obj;
98 u8 dw_state;
99 bool is_eos;
100 struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
101 u8 *host_sys_buf = NULL;
102 bool sched_dpc = false;
103 u16 mb_val = 0;
104
105 is_eos = (byte_size == 0);
106
107 /* Validate args */
108 if (!host_buf || !pchnl)
109 return -EFAULT;
110
111 if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode))
112 return -EPERM;
113
114 /*
115 * Check the channel state: only queue chirp if channel state
116 * allows it.
117 */
118 dw_state = pchnl->state;
119 if (dw_state != CHNL_STATEREADY) {
120 if (dw_state & CHNL_STATECANCEL)
121 return -ECANCELED;
122 if ((dw_state & CHNL_STATEEOS) &&
123 CHNL_IS_OUTPUT(pchnl->chnl_mode))
124 return -EPIPE;
125 /* No other possible states left */
126 DBC_ASSERT(0);
127 }
128
129 dev_obj = dev_get_first();
130 dev_get_bridge_context(dev_obj, &dev_ctxt);
131 if (!dev_ctxt)
132 return -EFAULT;
133
134 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
135 if (!(host_buf < (void *)USERMODE_ADDR)) {
136 host_sys_buf = host_buf;
137 goto func_cont;
138 }
139 /* if addr in user mode, then copy to kernel space */
140 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
141 if (host_sys_buf == NULL)
142 return -ENOMEM;
143
144 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
145 status = copy_from_user(host_sys_buf, host_buf,
146 buf_size);
147 if (status) {
148 kfree(host_sys_buf);
149 host_sys_buf = NULL;
150 return -EFAULT;
151 }
152 }
153 }
154 func_cont:
155 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
156 * channels. DPCCS is held to avoid race conditions with PCPY channels.
157 * If DPC is scheduled in process context (iosm_schedule) and any
158 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
159 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
160 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
161 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
162 if (pchnl->chnl_type == CHNL_PCPY) {
163 /* This is a processor-copy channel. */
164 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
165 /* Check buffer size on output channels for fit. */
166 if (byte_size > io_buf_size(
167 pchnl->chnl_mgr_obj->iomgr)) {
168 status = -EINVAL;
169 goto out;
170 }
171 }
172 }
173
174 /* Get a free chirp: */
175 if (list_empty(&pchnl->free_packets_list)) {
176 status = -EIO;
177 goto out;
178 }
179 chnl_packet_obj = list_first_entry(&pchnl->free_packets_list,
180 struct chnl_irp, link);
181 list_del(&chnl_packet_obj->link);
182
183 /* Enqueue the chirp on the chnl's IORequest queue: */
184 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
185 host_buf;
186 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
187 chnl_packet_obj->host_sys_buf = host_sys_buf;
188
189 /*
190 * Note: for dma chans dw_dsp_addr contains dsp address
191 * of SM buffer.
192 */
193 DBC_ASSERT(chnl_mgr_obj->word_size != 0);
194 /* DSP address */
195 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
196 chnl_packet_obj->byte_size = byte_size;
197 chnl_packet_obj->buf_size = buf_size;
198 /* Only valid for output channel */
199 chnl_packet_obj->arg = dw_arg;
200 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
201 CHNL_IOCSTATCOMPLETE);
202 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
203 pchnl->cio_reqs++;
204 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
205 /*
206 * If end of stream, update the channel state to prevent
207 * more IOR's.
208 */
209 if (is_eos)
210 pchnl->state |= CHNL_STATEEOS;
211
212 /* Legacy DSM Processor-Copy */
213 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
214 /* Request IO from the DSP */
215 io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
216 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
217 IO_OUTPUT), &mb_val);
218 sched_dpc = true;
219 out:
220 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
221 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
222 if (mb_val != 0)
223 sm_interrupt_dsp(dev_ctxt, mb_val);
224
225 /* Schedule a DPC, to do the actual data transfer */
226 if (sched_dpc)
227 iosm_schedule(chnl_mgr_obj->iomgr);
228
229 return status;
230 }
231
232 /*
233 * ======== bridge_chnl_cancel_io ========
234 * Return all I/O requests to the client which have not yet been
235 * transferred. The channel's I/O completion object is
236 * signalled, and all the I/O requests are queued as IOC's, with the
237 * status field set to CHNL_IOCSTATCANCEL.
238 * This call is typically used in abort situations, and is a prelude to
239 * chnl_close();
240 */
bridge_chnl_cancel_io(struct chnl_object * chnl_obj)241 int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
242 {
243 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
244 u32 chnl_id = -1;
245 s8 chnl_mode;
246 struct chnl_irp *chirp, *tmp;
247 struct chnl_mgr *chnl_mgr_obj = NULL;
248
249 /* Check args: */
250 if (!pchnl || !pchnl->chnl_mgr_obj)
251 return -EFAULT;
252
253 chnl_id = pchnl->chnl_id;
254 chnl_mode = pchnl->chnl_mode;
255 chnl_mgr_obj = pchnl->chnl_mgr_obj;
256
257 /* Mark this channel as cancelled, to prevent further IORequests or
258 * IORequests or dispatching. */
259 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
260
261 pchnl->state |= CHNL_STATECANCEL;
262
263 if (list_empty(&pchnl->io_requests)) {
264 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
265 return 0;
266 }
267
268 if (pchnl->chnl_type == CHNL_PCPY) {
269 /* Indicate we have no more buffers available for transfer: */
270 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
271 io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id);
272 } else {
273 /* Record that we no longer have output buffers
274 * available: */
275 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
276 }
277 }
278 /* Move all IOR's to IOC queue: */
279 list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) {
280 list_del(&chirp->link);
281 chirp->byte_size = 0;
282 chirp->status |= CHNL_IOCSTATCANCEL;
283 list_add_tail(&chirp->link, &pchnl->io_completions);
284 pchnl->cio_cs++;
285 pchnl->cio_reqs--;
286 DBC_ASSERT(pchnl->cio_reqs >= 0);
287 }
288
289 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
290
291 return 0;
292 }
293
294 /*
295 * ======== bridge_chnl_close ========
296 * Purpose:
297 * Ensures all pending I/O on this channel is cancelled, discards all
298 * queued I/O completion notifications, then frees the resources allocated
299 * for this channel, and makes the corresponding logical channel id
300 * available for subsequent use.
301 */
bridge_chnl_close(struct chnl_object * chnl_obj)302 int bridge_chnl_close(struct chnl_object *chnl_obj)
303 {
304 int status;
305 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
306
307 /* Check args: */
308 if (!pchnl)
309 return -EFAULT;
310 /* Cancel IO: this ensures no further IO requests or notifications */
311 status = bridge_chnl_cancel_io(chnl_obj);
312 if (status)
313 return status;
314 /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
315 DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
316 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
317 /* Free the slot in the channel manager: */
318 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
319 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
320 pchnl->chnl_mgr_obj->open_channels -= 1;
321 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
322 if (pchnl->ntfy_obj) {
323 ntfy_delete(pchnl->ntfy_obj);
324 kfree(pchnl->ntfy_obj);
325 pchnl->ntfy_obj = NULL;
326 }
327 /* Reset channel event: (NOTE: user_event freed in user context) */
328 if (pchnl->sync_event) {
329 sync_reset_event(pchnl->sync_event);
330 kfree(pchnl->sync_event);
331 pchnl->sync_event = NULL;
332 }
333 /* Free I/O request and I/O completion queues: */
334 free_chirp_list(&pchnl->io_completions);
335 pchnl->cio_cs = 0;
336
337 free_chirp_list(&pchnl->io_requests);
338 pchnl->cio_reqs = 0;
339
340 free_chirp_list(&pchnl->free_packets_list);
341
342 /* Release channel object. */
343 kfree(pchnl);
344
345 return status;
346 }
347
348 /*
349 * ======== bridge_chnl_create ========
350 * Create a channel manager object, responsible for opening new channels
351 * and closing old ones for a given board.
352 */
bridge_chnl_create(struct chnl_mgr ** channel_mgr,struct dev_object * hdev_obj,const struct chnl_mgrattrs * mgr_attrts)353 int bridge_chnl_create(struct chnl_mgr **channel_mgr,
354 struct dev_object *hdev_obj,
355 const struct chnl_mgrattrs *mgr_attrts)
356 {
357 int status = 0;
358 struct chnl_mgr *chnl_mgr_obj = NULL;
359 u8 max_channels;
360
361 /* Check DBC requirements: */
362 DBC_REQUIRE(channel_mgr != NULL);
363 DBC_REQUIRE(mgr_attrts != NULL);
364 DBC_REQUIRE(mgr_attrts->max_channels > 0);
365 DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
366 DBC_REQUIRE(mgr_attrts->word_size != 0);
367
368 /* Allocate channel manager object */
369 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
370 if (chnl_mgr_obj) {
371 /*
372 * The max_channels attr must equal the # of supported chnls for
373 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
374 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
375 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
376 */
377 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
378 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
379 /* Create array of channels */
380 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
381 * max_channels, GFP_KERNEL);
382 if (chnl_mgr_obj->channels) {
383 /* Initialize chnl_mgr object */
384 chnl_mgr_obj->type = CHNL_TYPESM;
385 chnl_mgr_obj->word_size = mgr_attrts->word_size;
386 /* Total # chnls supported */
387 chnl_mgr_obj->max_channels = max_channels;
388 chnl_mgr_obj->open_channels = 0;
389 chnl_mgr_obj->output_mask = 0;
390 chnl_mgr_obj->last_output = 0;
391 chnl_mgr_obj->dev_obj = hdev_obj;
392 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
393 } else {
394 status = -ENOMEM;
395 }
396 } else {
397 status = -ENOMEM;
398 }
399
400 if (status) {
401 bridge_chnl_destroy(chnl_mgr_obj);
402 *channel_mgr = NULL;
403 } else {
404 /* Return channel manager object to caller... */
405 *channel_mgr = chnl_mgr_obj;
406 }
407 return status;
408 }
409
410 /*
411 * ======== bridge_chnl_destroy ========
412 * Purpose:
413 * Close all open channels, and destroy the channel manager.
414 */
bridge_chnl_destroy(struct chnl_mgr * hchnl_mgr)415 int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
416 {
417 int status = 0;
418 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
419 u32 chnl_id;
420
421 if (hchnl_mgr) {
422 /* Close all open channels: */
423 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
424 chnl_id++) {
425 status =
426 bridge_chnl_close(chnl_mgr_obj->channels
427 [chnl_id]);
428 if (status)
429 dev_dbg(bridge, "%s: Error status 0x%x\n",
430 __func__, status);
431 }
432
433 /* Free channel manager object: */
434 kfree(chnl_mgr_obj->channels);
435
436 /* Set hchnl_mgr to NULL in device object. */
437 dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
438 /* Free this Chnl Mgr object: */
439 kfree(hchnl_mgr);
440 } else {
441 status = -EFAULT;
442 }
443 return status;
444 }
445
446 /*
447 * ======== bridge_chnl_flush_io ========
448 * purpose:
449 * Flushes all the outstanding data requests on a channel.
450 */
bridge_chnl_flush_io(struct chnl_object * chnl_obj,u32 timeout)451 int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
452 {
453 int status = 0;
454 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
455 s8 chnl_mode = -1;
456 struct chnl_mgr *chnl_mgr_obj;
457 struct chnl_ioc chnl_ioc_obj;
458 /* Check args: */
459 if (pchnl) {
460 if ((timeout == CHNL_IOCNOWAIT)
461 && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
462 status = -EINVAL;
463 } else {
464 chnl_mode = pchnl->chnl_mode;
465 chnl_mgr_obj = pchnl->chnl_mgr_obj;
466 }
467 } else {
468 status = -EFAULT;
469 }
470 if (!status) {
471 /* Note: Currently, if another thread continues to add IO
472 * requests to this channel, this function will continue to
473 * flush all such queued IO requests. */
474 if (CHNL_IS_OUTPUT(chnl_mode)
475 && (pchnl->chnl_type == CHNL_PCPY)) {
476 /* Wait for IO completions, up to the specified
477 * timeout: */
478 while (!list_empty(&pchnl->io_requests) && !status) {
479 status = bridge_chnl_get_ioc(chnl_obj,
480 timeout, &chnl_ioc_obj);
481 if (status)
482 continue;
483
484 if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
485 status = -ETIMEDOUT;
486
487 }
488 } else {
489 status = bridge_chnl_cancel_io(chnl_obj);
490 /* Now, leave the channel in the ready state: */
491 pchnl->state &= ~CHNL_STATECANCEL;
492 }
493 }
494 DBC_ENSURE(status || list_empty(&pchnl->io_requests));
495 return status;
496 }
497
498 /*
499 * ======== bridge_chnl_get_info ========
500 * Purpose:
501 * Retrieve information related to a channel.
502 */
bridge_chnl_get_info(struct chnl_object * chnl_obj,struct chnl_info * channel_info)503 int bridge_chnl_get_info(struct chnl_object *chnl_obj,
504 struct chnl_info *channel_info)
505 {
506 int status = 0;
507 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
508 if (channel_info != NULL) {
509 if (pchnl) {
510 /* Return the requested information: */
511 channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
512 channel_info->event_obj = pchnl->user_event;
513 channel_info->cnhl_id = pchnl->chnl_id;
514 channel_info->mode = pchnl->chnl_mode;
515 channel_info->bytes_tx = pchnl->bytes_moved;
516 channel_info->process = pchnl->process;
517 channel_info->sync_event = pchnl->sync_event;
518 channel_info->cio_cs = pchnl->cio_cs;
519 channel_info->cio_reqs = pchnl->cio_reqs;
520 channel_info->state = pchnl->state;
521 } else {
522 status = -EFAULT;
523 }
524 } else {
525 status = -EFAULT;
526 }
527 return status;
528 }
529
530 /*
531 * ======== bridge_chnl_get_ioc ========
532 * Optionally wait for I/O completion on a channel. Dequeue an I/O
533 * completion record, which contains information about the completed
534 * I/O request.
535 * Note: Ensures Channel Invariant (see notes above).
536 */
bridge_chnl_get_ioc(struct chnl_object * chnl_obj,u32 timeout,struct chnl_ioc * chan_ioc)537 int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
538 struct chnl_ioc *chan_ioc)
539 {
540 int status = 0;
541 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
542 struct chnl_irp *chnl_packet_obj;
543 int stat_sync;
544 bool dequeue_ioc = true;
545 struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
546 u8 *host_sys_buf = NULL;
547 struct bridge_dev_context *dev_ctxt;
548 struct dev_object *dev_obj;
549
550 /* Check args: */
551 if (!chan_ioc || !pchnl) {
552 status = -EFAULT;
553 } else if (timeout == CHNL_IOCNOWAIT) {
554 if (list_empty(&pchnl->io_completions))
555 status = -EREMOTEIO;
556
557 }
558
559 dev_obj = dev_get_first();
560 dev_get_bridge_context(dev_obj, &dev_ctxt);
561 if (!dev_ctxt)
562 status = -EFAULT;
563
564 if (status)
565 goto func_end;
566
567 ioc.status = CHNL_IOCSTATCOMPLETE;
568 if (timeout !=
569 CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) {
570 if (timeout == CHNL_IOCINFINITE)
571 timeout = SYNC_INFINITE;
572
573 stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
574 if (stat_sync == -ETIME) {
575 /* No response from DSP */
576 ioc.status |= CHNL_IOCSTATTIMEOUT;
577 dequeue_ioc = false;
578 } else if (stat_sync == -EPERM) {
579 /* This can occur when the user mode thread is
580 * aborted (^C), or when _VWIN32_WaitSingleObject()
581 * fails due to unknown causes. */
582 /* Even though Wait failed, there may be something in
583 * the Q: */
584 if (list_empty(&pchnl->io_completions)) {
585 ioc.status |= CHNL_IOCSTATCANCEL;
586 dequeue_ioc = false;
587 }
588 }
589 }
590 /* See comment in AddIOReq */
591 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
592 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
593 if (dequeue_ioc) {
594 /* Dequeue IOC and set chan_ioc; */
595 DBC_ASSERT(!list_empty(&pchnl->io_completions));
596 chnl_packet_obj = list_first_entry(&pchnl->io_completions,
597 struct chnl_irp, link);
598 list_del(&chnl_packet_obj->link);
599 /* Update chan_ioc from channel state and chirp: */
600 pchnl->cio_cs--;
601 /*
602 * If this is a zero-copy channel, then set IOC's pbuf
603 * to the DSP's address. This DSP address will get
604 * translated to user's virtual addr later.
605 */
606 host_sys_buf = chnl_packet_obj->host_sys_buf;
607 ioc.buf = chnl_packet_obj->host_user_buf;
608 ioc.byte_size = chnl_packet_obj->byte_size;
609 ioc.buf_size = chnl_packet_obj->buf_size;
610 ioc.arg = chnl_packet_obj->arg;
611 ioc.status |= chnl_packet_obj->status;
612 /* Place the used chirp on the free list: */
613 list_add_tail(&chnl_packet_obj->link,
614 &pchnl->free_packets_list);
615 } else {
616 ioc.buf = NULL;
617 ioc.byte_size = 0;
618 ioc.arg = 0;
619 ioc.buf_size = 0;
620 }
621 /* Ensure invariant: If any IOC's are queued for this channel... */
622 if (!list_empty(&pchnl->io_completions)) {
623 /* Since DSPStream_Reclaim() does not take a timeout
624 * parameter, we pass the stream's timeout value to
625 * bridge_chnl_get_ioc. We cannot determine whether or not
626 * we have waited in User mode. Since the stream's timeout
627 * value may be non-zero, we still have to set the event.
628 * Therefore, this optimization is taken out.
629 *
630 * if (timeout == CHNL_IOCNOWAIT) {
631 * ... ensure event is set..
632 * sync_set_event(pchnl->sync_event);
633 * } */
634 sync_set_event(pchnl->sync_event);
635 } else {
636 /* else, if list is empty, ensure event is reset. */
637 sync_reset_event(pchnl->sync_event);
638 }
639 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
640 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
641 if (dequeue_ioc
642 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
643 if (!(ioc.buf < (void *)USERMODE_ADDR))
644 goto func_cont;
645
646 /* If the addr is in user mode, then copy it */
647 if (!host_sys_buf || !ioc.buf) {
648 status = -EFAULT;
649 goto func_cont;
650 }
651 if (!CHNL_IS_INPUT(pchnl->chnl_mode))
652 goto func_cont1;
653
654 /*host_user_buf */
655 status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size);
656 if (status) {
657 if (current->flags & PF_EXITING)
658 status = 0;
659 }
660 if (status)
661 status = -EFAULT;
662 func_cont1:
663 kfree(host_sys_buf);
664 }
665 func_cont:
666 /* Update User's IOC block: */
667 *chan_ioc = ioc;
668 func_end:
669 return status;
670 }
671
672 /*
673 * ======== bridge_chnl_get_mgr_info ========
674 * Retrieve information related to the channel manager.
675 */
bridge_chnl_get_mgr_info(struct chnl_mgr * hchnl_mgr,u32 ch_id,struct chnl_mgrinfo * mgr_info)676 int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
677 struct chnl_mgrinfo *mgr_info)
678 {
679 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
680
681 if (!mgr_info || !hchnl_mgr)
682 return -EFAULT;
683
684 if (ch_id > CHNL_MAXCHANNELS)
685 return -ECHRNG;
686
687 /* Return the requested information: */
688 mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id];
689 mgr_info->open_channels = chnl_mgr_obj->open_channels;
690 mgr_info->type = chnl_mgr_obj->type;
691 /* total # of chnls */
692 mgr_info->max_channels = chnl_mgr_obj->max_channels;
693
694 return 0;
695 }
696
697 /*
698 * ======== bridge_chnl_idle ========
699 * Idles a particular channel.
700 */
bridge_chnl_idle(struct chnl_object * chnl_obj,u32 timeout,bool flush_data)701 int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
702 bool flush_data)
703 {
704 s8 chnl_mode;
705 struct chnl_mgr *chnl_mgr_obj;
706 int status = 0;
707
708 DBC_REQUIRE(chnl_obj);
709
710 chnl_mode = chnl_obj->chnl_mode;
711 chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
712
713 if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
714 /* Wait for IO completions, up to the specified timeout: */
715 status = bridge_chnl_flush_io(chnl_obj, timeout);
716 } else {
717 status = bridge_chnl_cancel_io(chnl_obj);
718
719 /* Reset the byte count and put channel back in ready state. */
720 chnl_obj->bytes_moved = 0;
721 chnl_obj->state &= ~CHNL_STATECANCEL;
722 }
723
724 return status;
725 }
726
727 /*
728 * ======== bridge_chnl_open ========
729 * Open a new half-duplex channel to the DSP board.
730 */
bridge_chnl_open(struct chnl_object ** chnl,struct chnl_mgr * hchnl_mgr,s8 chnl_mode,u32 ch_id,const struct chnl_attr * pattrs)731 int bridge_chnl_open(struct chnl_object **chnl,
732 struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
733 u32 ch_id, const struct chnl_attr *pattrs)
734 {
735 int status = 0;
736 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
737 struct chnl_object *pchnl = NULL;
738 struct sync_object *sync_event = NULL;
739 /* Ensure DBC requirements: */
740 DBC_REQUIRE(chnl != NULL);
741 DBC_REQUIRE(pattrs != NULL);
742 DBC_REQUIRE(hchnl_mgr != NULL);
743 *chnl = NULL;
744
745 /* Validate Args: */
746 if (!pattrs->uio_reqs)
747 return -EINVAL;
748
749 if (!hchnl_mgr)
750 return -EFAULT;
751
752 if (ch_id != CHNL_PICKFREE) {
753 if (ch_id >= chnl_mgr_obj->max_channels)
754 return -ECHRNG;
755 if (chnl_mgr_obj->channels[ch_id] != NULL)
756 return -EALREADY;
757 } else {
758 /* Check for free channel */
759 status = search_free_channel(chnl_mgr_obj, &ch_id);
760 if (status)
761 return status;
762 }
763
764 DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
765
766 /* Create channel object: */
767 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
768 if (!pchnl)
769 return -ENOMEM;
770
771 /* Protect queues from io_dpc: */
772 pchnl->state = CHNL_STATECANCEL;
773
774 /* Allocate initial IOR and IOC queues: */
775 status = create_chirp_list(&pchnl->free_packets_list,
776 pattrs->uio_reqs);
777 if (status)
778 goto out_err;
779
780 INIT_LIST_HEAD(&pchnl->io_requests);
781 INIT_LIST_HEAD(&pchnl->io_completions);
782
783 pchnl->chnl_packets = pattrs->uio_reqs;
784 pchnl->cio_cs = 0;
785 pchnl->cio_reqs = 0;
786
787 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
788 if (!sync_event) {
789 status = -ENOMEM;
790 goto out_err;
791 }
792 sync_init_event(sync_event);
793
794 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
795 if (!pchnl->ntfy_obj) {
796 status = -ENOMEM;
797 goto out_err;
798 }
799 ntfy_init(pchnl->ntfy_obj);
800
801 /* Initialize CHNL object fields: */
802 pchnl->chnl_mgr_obj = chnl_mgr_obj;
803 pchnl->chnl_id = ch_id;
804 pchnl->chnl_mode = chnl_mode;
805 pchnl->user_event = sync_event;
806 pchnl->sync_event = sync_event;
807 /* Get the process handle */
808 pchnl->process = current->tgid;
809 pchnl->cb_arg = 0;
810 pchnl->bytes_moved = 0;
811 /* Default to proc-copy */
812 pchnl->chnl_type = CHNL_PCPY;
813
814 /* Insert channel object in channel manager: */
815 chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl;
816 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
817 chnl_mgr_obj->open_channels++;
818 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
819 /* Return result... */
820 pchnl->state = CHNL_STATEREADY;
821 *chnl = pchnl;
822
823 return status;
824
825 out_err:
826 /* Free memory */
827 free_chirp_list(&pchnl->io_completions);
828 free_chirp_list(&pchnl->io_requests);
829 free_chirp_list(&pchnl->free_packets_list);
830
831 kfree(sync_event);
832
833 if (pchnl->ntfy_obj) {
834 ntfy_delete(pchnl->ntfy_obj);
835 kfree(pchnl->ntfy_obj);
836 pchnl->ntfy_obj = NULL;
837 }
838 kfree(pchnl);
839
840 return status;
841 }
842
843 /*
844 * ======== bridge_chnl_register_notify ========
845 * Registers for events on a particular channel.
846 */
bridge_chnl_register_notify(struct chnl_object * chnl_obj,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)847 int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
848 u32 event_mask, u32 notify_type,
849 struct dsp_notification *hnotification)
850 {
851 int status = 0;
852
853 DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
854
855 if (event_mask)
856 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
857 event_mask, notify_type);
858 else
859 status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
860
861 return status;
862 }
863
864 /*
865 * ======== create_chirp_list ========
866 * Purpose:
867 * Initialize a queue of channel I/O Request/Completion packets.
868 * Parameters:
869 * list: Pointer to a list_head
870 * chirps: Number of Chirps to allocate.
871 * Returns:
872 * 0 if successful, error code otherwise.
873 * Requires:
874 * Ensures:
875 */
create_chirp_list(struct list_head * list,u32 chirps)876 static int create_chirp_list(struct list_head *list, u32 chirps)
877 {
878 struct chnl_irp *chirp;
879 u32 i;
880
881 INIT_LIST_HEAD(list);
882
883 /* Make N chirps and place on queue. */
884 for (i = 0; i < chirps; i++) {
885 chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
886 if (!chirp)
887 break;
888 list_add_tail(&chirp->link, list);
889 }
890
891 /* If we couldn't allocate all chirps, free those allocated: */
892 if (i != chirps) {
893 free_chirp_list(list);
894 return -ENOMEM;
895 }
896
897 return 0;
898 }
899
900 /*
901 * ======== free_chirp_list ========
902 * Purpose:
903 * Free the queue of Chirps.
904 */
free_chirp_list(struct list_head * chirp_list)905 static void free_chirp_list(struct list_head *chirp_list)
906 {
907 struct chnl_irp *chirp, *tmp;
908
909 DBC_REQUIRE(chirp_list != NULL);
910
911 list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
912 list_del(&chirp->link);
913 kfree(chirp);
914 }
915 }
916
917 /*
918 * ======== search_free_channel ========
919 * Search for a free channel slot in the array of channel pointers.
920 */
search_free_channel(struct chnl_mgr * chnl_mgr_obj,u32 * chnl)921 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
922 u32 *chnl)
923 {
924 int status = -ENOSR;
925 u32 i;
926
927 DBC_REQUIRE(chnl_mgr_obj);
928
929 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
930 if (chnl_mgr_obj->channels[i] == NULL) {
931 status = 0;
932 *chnl = i;
933 break;
934 }
935 }
936
937 return status;
938 }
939