1 /*
2  * chnl_sm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Implements upper edge functions for Bridge driver channel module.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 
19 /*
20  *      The lower edge functions must be implemented by the Bridge driver
21  *      writer, and are declared in chnl_sm.h.
22  *
23  *      Care is taken in this code to prevent simulataneous access to channel
24  *      queues from
25  *      1. Threads.
26  *      2. io_dpc(), scheduled from the io_isr() as an event.
27  *
28  *      This is done primarily by:
29  *      - Semaphores.
30  *      - state flags in the channel object; and
31  *      - ensuring the IO_Dispatch() routine, which is called from both
32  *        CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33  *
34  *  Channel Invariant:
35  *      There is an important invariant condition which must be maintained per
36  *      channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37  *      which may cause timeouts and/or failure offunction sync_wait_on_event.
38  *      This invariant condition is:
39  *
40  *          list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
41  *      and
42  *          !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
43  */
44 
45 #include <linux/types.h>
46 
47 /*  ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
49 
50 /*  ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
52 
53 /*  ----------------------------------- OS Adaptation Layer */
54 #include <dspbridge/sync.h>
55 
56 /*  ----------------------------------- Bridge Driver */
57 #include <dspbridge/dspdefs.h>
58 #include <dspbridge/dspchnl.h>
59 #include "_tiomap.h"
60 
61 /*  ----------------------------------- Platform Manager */
62 #include <dspbridge/dev.h>
63 
64 /*  ----------------------------------- Others */
65 #include <dspbridge/io_sm.h>
66 
67 /*  ----------------------------------- Define for This */
68 #define USERMODE_ADDR   PAGE_OFFSET
69 
70 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
71 
72 /*  ----------------------------------- Function Prototypes */
73 static int create_chirp_list(struct list_head *list, u32 chirps);
74 
75 static void free_chirp_list(struct list_head *list);
76 
77 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
78 				      u32 *chnl);
79 
80 /*
81  *  ======== bridge_chnl_add_io_req ========
82  *      Enqueue an I/O request for data transfer on a channel to the DSP.
83  *      The direction (mode) is specified in the channel object. Note the DSP
84  *      address is specified for channels opened in direct I/O mode.
85  */
bridge_chnl_add_io_req(struct chnl_object * chnl_obj,void * host_buf,u32 byte_size,u32 buf_size,u32 dw_dsp_addr,u32 dw_arg)86 int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
87 			       u32 byte_size, u32 buf_size,
88 			       u32 dw_dsp_addr, u32 dw_arg)
89 {
90 	int status = 0;
91 	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
92 	struct chnl_irp *chnl_packet_obj = NULL;
93 	struct bridge_dev_context *dev_ctxt;
94 	struct dev_object *dev_obj;
95 	u8 dw_state;
96 	bool is_eos;
97 	struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
98 	u8 *host_sys_buf = NULL;
99 	bool sched_dpc = false;
100 	u16 mb_val = 0;
101 
102 	is_eos = (byte_size == 0);
103 
104 	/* Validate args */
105 	if (!host_buf || !pchnl)
106 		return -EFAULT;
107 
108 	if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode))
109 		return -EPERM;
110 
111 	/*
112 	 * Check the channel state: only queue chirp if channel state
113 	 * allows it.
114 	 */
115 	dw_state = pchnl->state;
116 	if (dw_state != CHNL_STATEREADY) {
117 		if (dw_state & CHNL_STATECANCEL)
118 			return -ECANCELED;
119 		if ((dw_state & CHNL_STATEEOS) &&
120 				CHNL_IS_OUTPUT(pchnl->chnl_mode))
121 			return -EPIPE;
122 		/* No other possible states left */
123 	}
124 
125 	dev_obj = dev_get_first();
126 	dev_get_bridge_context(dev_obj, &dev_ctxt);
127 	if (!dev_ctxt)
128 		return -EFAULT;
129 
130 	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
131 		if (!(host_buf < (void *)USERMODE_ADDR)) {
132 			host_sys_buf = host_buf;
133 			goto func_cont;
134 		}
135 		/* if addr in user mode, then copy to kernel space */
136 		host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
137 		if (host_sys_buf == NULL)
138 			return -ENOMEM;
139 
140 		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
141 			status = copy_from_user(host_sys_buf, host_buf,
142 					buf_size);
143 			if (status) {
144 				kfree(host_sys_buf);
145 				host_sys_buf = NULL;
146 				return -EFAULT;
147 			}
148 		}
149 	}
150 func_cont:
151 	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
152 	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
153 	 * If DPC is scheduled in process context (iosm_schedule) and any
154 	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
155 	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
156 	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
157 	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
158 	if (pchnl->chnl_type == CHNL_PCPY) {
159 		/* This is a processor-copy channel. */
160 		if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
161 			/* Check buffer size on output channels for fit. */
162 			if (byte_size > io_buf_size(
163 						pchnl->chnl_mgr_obj->iomgr)) {
164 				status = -EINVAL;
165 				goto out;
166 			}
167 		}
168 	}
169 
170 	/* Get a free chirp: */
171 	if (list_empty(&pchnl->free_packets_list)) {
172 		status = -EIO;
173 		goto out;
174 	}
175 	chnl_packet_obj = list_first_entry(&pchnl->free_packets_list,
176 			struct chnl_irp, link);
177 	list_del(&chnl_packet_obj->link);
178 
179 	/* Enqueue the chirp on the chnl's IORequest queue: */
180 	chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
181 		host_buf;
182 	if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
183 		chnl_packet_obj->host_sys_buf = host_sys_buf;
184 
185 	/*
186 	 * Note: for dma chans dw_dsp_addr contains dsp address
187 	 * of SM buffer.
188 	 */
189 	/* DSP address */
190 	chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
191 	chnl_packet_obj->byte_size = byte_size;
192 	chnl_packet_obj->buf_size = buf_size;
193 	/* Only valid for output channel */
194 	chnl_packet_obj->arg = dw_arg;
195 	chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
196 			CHNL_IOCSTATCOMPLETE);
197 	list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
198 	pchnl->cio_reqs++;
199 	/*
200 	 * If end of stream, update the channel state to prevent
201 	 * more IOR's.
202 	 */
203 	if (is_eos)
204 		pchnl->state |= CHNL_STATEEOS;
205 
206 	/* Request IO from the DSP */
207 	io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
208 			(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
209 			 IO_OUTPUT), &mb_val);
210 	sched_dpc = true;
211 out:
212 	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
213 	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
214 	if (mb_val != 0)
215 		sm_interrupt_dsp(dev_ctxt, mb_val);
216 
217 	/* Schedule a DPC, to do the actual data transfer */
218 	if (sched_dpc)
219 		iosm_schedule(chnl_mgr_obj->iomgr);
220 
221 	return status;
222 }
223 
224 /*
225  *  ======== bridge_chnl_cancel_io ========
226  *      Return all I/O requests to the client which have not yet been
227  *      transferred.  The channel's I/O completion object is
228  *      signalled, and all the I/O requests are queued as IOC's, with the
229  *      status field set to CHNL_IOCSTATCANCEL.
230  *      This call is typically used in abort situations, and is a prelude to
231  *      chnl_close();
232  */
bridge_chnl_cancel_io(struct chnl_object * chnl_obj)233 int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
234 {
235 	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
236 	u32 chnl_id = -1;
237 	s8 chnl_mode;
238 	struct chnl_irp *chirp, *tmp;
239 	struct chnl_mgr *chnl_mgr_obj = NULL;
240 
241 	/* Check args: */
242 	if (!pchnl || !pchnl->chnl_mgr_obj)
243 		return -EFAULT;
244 
245 	chnl_id = pchnl->chnl_id;
246 	chnl_mode = pchnl->chnl_mode;
247 	chnl_mgr_obj = pchnl->chnl_mgr_obj;
248 
249 	/*  Mark this channel as cancelled, to prevent further IORequests or
250 	 *  IORequests or dispatching. */
251 	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
252 
253 	pchnl->state |= CHNL_STATECANCEL;
254 
255 	if (list_empty(&pchnl->io_requests)) {
256 		spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
257 		return 0;
258 	}
259 
260 	if (pchnl->chnl_type == CHNL_PCPY) {
261 		/* Indicate we have no more buffers available for transfer: */
262 		if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
263 			io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id);
264 		} else {
265 			/* Record that we no longer have output buffers
266 			 * available: */
267 			chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
268 		}
269 	}
270 	/* Move all IOR's to IOC queue: */
271 	list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) {
272 		list_del(&chirp->link);
273 		chirp->byte_size = 0;
274 		chirp->status |= CHNL_IOCSTATCANCEL;
275 		list_add_tail(&chirp->link, &pchnl->io_completions);
276 		pchnl->cio_cs++;
277 		pchnl->cio_reqs--;
278 	}
279 
280 	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
281 
282 	return 0;
283 }
284 
285 /*
286  *  ======== bridge_chnl_close ========
287  *  Purpose:
288  *      Ensures all pending I/O on this channel is cancelled, discards all
289  *      queued I/O completion notifications, then frees the resources allocated
290  *      for this channel, and makes the corresponding logical channel id
291  *      available for subsequent use.
292  */
bridge_chnl_close(struct chnl_object * chnl_obj)293 int bridge_chnl_close(struct chnl_object *chnl_obj)
294 {
295 	int status;
296 	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
297 
298 	/* Check args: */
299 	if (!pchnl)
300 		return -EFAULT;
301 	/* Cancel IO: this ensures no further IO requests or notifications */
302 	status = bridge_chnl_cancel_io(chnl_obj);
303 	if (status)
304 		return status;
305 	/* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
306 	/* Free the slot in the channel manager: */
307 	pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
308 	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
309 	pchnl->chnl_mgr_obj->open_channels -= 1;
310 	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
311 	if (pchnl->ntfy_obj) {
312 		ntfy_delete(pchnl->ntfy_obj);
313 		kfree(pchnl->ntfy_obj);
314 		pchnl->ntfy_obj = NULL;
315 	}
316 	/* Reset channel event: (NOTE: user_event freed in user context) */
317 	if (pchnl->sync_event) {
318 		sync_reset_event(pchnl->sync_event);
319 		kfree(pchnl->sync_event);
320 		pchnl->sync_event = NULL;
321 	}
322 	/* Free I/O request and I/O completion queues: */
323 	free_chirp_list(&pchnl->io_completions);
324 	pchnl->cio_cs = 0;
325 
326 	free_chirp_list(&pchnl->io_requests);
327 	pchnl->cio_reqs = 0;
328 
329 	free_chirp_list(&pchnl->free_packets_list);
330 
331 	/* Release channel object. */
332 	kfree(pchnl);
333 
334 	return status;
335 }
336 
337 /*
338  *  ======== bridge_chnl_create ========
339  *      Create a channel manager object, responsible for opening new channels
340  *      and closing old ones for a given board.
341  */
bridge_chnl_create(struct chnl_mgr ** channel_mgr,struct dev_object * hdev_obj,const struct chnl_mgrattrs * mgr_attrts)342 int bridge_chnl_create(struct chnl_mgr **channel_mgr,
343 			      struct dev_object *hdev_obj,
344 			      const struct chnl_mgrattrs *mgr_attrts)
345 {
346 	int status = 0;
347 	struct chnl_mgr *chnl_mgr_obj = NULL;
348 	u8 max_channels;
349 
350 	/* Allocate channel manager object */
351 	chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
352 	if (chnl_mgr_obj) {
353 		/*
354 		 * The max_channels attr must equal the # of supported chnls for
355 		 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
356 		 *      mgr_attrts->max_channels = CHNL_MAXCHANNELS =
357 		 *                       DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
358 		 */
359 		max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
360 		/* Create array of channels */
361 		chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
362 						* max_channels, GFP_KERNEL);
363 		if (chnl_mgr_obj->channels) {
364 			/* Initialize chnl_mgr object */
365 			chnl_mgr_obj->type = CHNL_TYPESM;
366 			chnl_mgr_obj->word_size = mgr_attrts->word_size;
367 			/* Total # chnls supported */
368 			chnl_mgr_obj->max_channels = max_channels;
369 			chnl_mgr_obj->open_channels = 0;
370 			chnl_mgr_obj->output_mask = 0;
371 			chnl_mgr_obj->last_output = 0;
372 			chnl_mgr_obj->dev_obj = hdev_obj;
373 			spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
374 		} else {
375 			status = -ENOMEM;
376 		}
377 	} else {
378 		status = -ENOMEM;
379 	}
380 
381 	if (status) {
382 		bridge_chnl_destroy(chnl_mgr_obj);
383 		*channel_mgr = NULL;
384 	} else {
385 		/* Return channel manager object to caller... */
386 		*channel_mgr = chnl_mgr_obj;
387 	}
388 	return status;
389 }
390 
391 /*
392  *  ======== bridge_chnl_destroy ========
393  *  Purpose:
394  *      Close all open channels, and destroy the channel manager.
395  */
bridge_chnl_destroy(struct chnl_mgr * hchnl_mgr)396 int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
397 {
398 	int status = 0;
399 	struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
400 	u32 chnl_id;
401 
402 	if (hchnl_mgr) {
403 		/* Close all open channels: */
404 		for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
405 		     chnl_id++) {
406 			status =
407 			    bridge_chnl_close(chnl_mgr_obj->channels
408 					      [chnl_id]);
409 			if (status)
410 				dev_dbg(bridge, "%s: Error status 0x%x\n",
411 					__func__, status);
412 		}
413 
414 		/* Free channel manager object: */
415 		kfree(chnl_mgr_obj->channels);
416 
417 		/* Set hchnl_mgr to NULL in device object. */
418 		dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
419 		/* Free this Chnl Mgr object: */
420 		kfree(hchnl_mgr);
421 	} else {
422 		status = -EFAULT;
423 	}
424 	return status;
425 }
426 
427 /*
428  *  ======== bridge_chnl_flush_io ========
429  *  purpose:
430  *      Flushes all the outstanding data requests on a channel.
431  */
bridge_chnl_flush_io(struct chnl_object * chnl_obj,u32 timeout)432 int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
433 {
434 	int status = 0;
435 	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
436 	s8 chnl_mode = -1;
437 	struct chnl_mgr *chnl_mgr_obj;
438 	struct chnl_ioc chnl_ioc_obj;
439 	/* Check args: */
440 	if (pchnl) {
441 		if ((timeout == CHNL_IOCNOWAIT)
442 		    && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
443 			status = -EINVAL;
444 		} else {
445 			chnl_mode = pchnl->chnl_mode;
446 			chnl_mgr_obj = pchnl->chnl_mgr_obj;
447 		}
448 	} else {
449 		status = -EFAULT;
450 	}
451 	if (!status) {
452 		/* Note: Currently, if another thread continues to add IO
453 		 * requests to this channel, this function will continue to
454 		 * flush all such queued IO requests. */
455 		if (CHNL_IS_OUTPUT(chnl_mode)
456 		    && (pchnl->chnl_type == CHNL_PCPY)) {
457 			/* Wait for IO completions, up to the specified
458 			 * timeout: */
459 			while (!list_empty(&pchnl->io_requests) && !status) {
460 				status = bridge_chnl_get_ioc(chnl_obj,
461 						timeout, &chnl_ioc_obj);
462 				if (status)
463 					continue;
464 
465 				if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
466 					status = -ETIMEDOUT;
467 
468 			}
469 		} else {
470 			status = bridge_chnl_cancel_io(chnl_obj);
471 			/* Now, leave the channel in the ready state: */
472 			pchnl->state &= ~CHNL_STATECANCEL;
473 		}
474 	}
475 	return status;
476 }
477 
478 /*
479  *  ======== bridge_chnl_get_info ========
480  *  Purpose:
481  *      Retrieve information related to a channel.
482  */
bridge_chnl_get_info(struct chnl_object * chnl_obj,struct chnl_info * channel_info)483 int bridge_chnl_get_info(struct chnl_object *chnl_obj,
484 			     struct chnl_info *channel_info)
485 {
486 	int status = 0;
487 	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
488 	if (channel_info != NULL) {
489 		if (pchnl) {
490 			/* Return the requested information: */
491 			channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
492 			channel_info->event_obj = pchnl->user_event;
493 			channel_info->cnhl_id = pchnl->chnl_id;
494 			channel_info->mode = pchnl->chnl_mode;
495 			channel_info->bytes_tx = pchnl->bytes_moved;
496 			channel_info->process = pchnl->process;
497 			channel_info->sync_event = pchnl->sync_event;
498 			channel_info->cio_cs = pchnl->cio_cs;
499 			channel_info->cio_reqs = pchnl->cio_reqs;
500 			channel_info->state = pchnl->state;
501 		} else {
502 			status = -EFAULT;
503 		}
504 	} else {
505 		status = -EFAULT;
506 	}
507 	return status;
508 }
509 
510 /*
511  *  ======== bridge_chnl_get_ioc ========
512  *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
513  *      completion record, which contains information about the completed
514  *      I/O request.
515  *      Note: Ensures Channel Invariant (see notes above).
516  */
bridge_chnl_get_ioc(struct chnl_object * chnl_obj,u32 timeout,struct chnl_ioc * chan_ioc)517 int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
518 			    struct chnl_ioc *chan_ioc)
519 {
520 	int status = 0;
521 	struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
522 	struct chnl_irp *chnl_packet_obj;
523 	int stat_sync;
524 	bool dequeue_ioc = true;
525 	struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
526 	u8 *host_sys_buf = NULL;
527 	struct bridge_dev_context *dev_ctxt;
528 	struct dev_object *dev_obj;
529 
530 	/* Check args: */
531 	if (!chan_ioc || !pchnl) {
532 		status = -EFAULT;
533 	} else if (timeout == CHNL_IOCNOWAIT) {
534 		if (list_empty(&pchnl->io_completions))
535 			status = -EREMOTEIO;
536 
537 	}
538 
539 	dev_obj = dev_get_first();
540 	dev_get_bridge_context(dev_obj, &dev_ctxt);
541 	if (!dev_ctxt)
542 		status = -EFAULT;
543 
544 	if (status)
545 		goto func_end;
546 
547 	ioc.status = CHNL_IOCSTATCOMPLETE;
548 	if (timeout !=
549 	    CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) {
550 		if (timeout == CHNL_IOCINFINITE)
551 			timeout = SYNC_INFINITE;
552 
553 		stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
554 		if (stat_sync == -ETIME) {
555 			/* No response from DSP */
556 			ioc.status |= CHNL_IOCSTATTIMEOUT;
557 			dequeue_ioc = false;
558 		} else if (stat_sync == -EPERM) {
559 			/* This can occur when the user mode thread is
560 			 * aborted (^C), or when _VWIN32_WaitSingleObject()
561 			 * fails due to unknown causes. */
562 			/* Even though Wait failed, there may be something in
563 			 * the Q: */
564 			if (list_empty(&pchnl->io_completions)) {
565 				ioc.status |= CHNL_IOCSTATCANCEL;
566 				dequeue_ioc = false;
567 			}
568 		}
569 	}
570 	/* See comment in AddIOReq */
571 	spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
572 	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
573 	if (dequeue_ioc) {
574 		/* Dequeue IOC and set chan_ioc; */
575 		chnl_packet_obj = list_first_entry(&pchnl->io_completions,
576 				struct chnl_irp, link);
577 		list_del(&chnl_packet_obj->link);
578 		/* Update chan_ioc from channel state and chirp: */
579 		pchnl->cio_cs--;
580 		/*
581 		 * If this is a zero-copy channel, then set IOC's pbuf
582 		 * to the DSP's address. This DSP address will get
583 		 * translated to user's virtual addr later.
584 		 */
585 		host_sys_buf = chnl_packet_obj->host_sys_buf;
586 		ioc.buf = chnl_packet_obj->host_user_buf;
587 		ioc.byte_size = chnl_packet_obj->byte_size;
588 		ioc.buf_size = chnl_packet_obj->buf_size;
589 		ioc.arg = chnl_packet_obj->arg;
590 		ioc.status |= chnl_packet_obj->status;
591 		/* Place the used chirp on the free list: */
592 		list_add_tail(&chnl_packet_obj->link,
593 				&pchnl->free_packets_list);
594 	} else {
595 		ioc.buf = NULL;
596 		ioc.byte_size = 0;
597 		ioc.arg = 0;
598 		ioc.buf_size = 0;
599 	}
600 	/* Ensure invariant: If any IOC's are queued for this channel... */
601 	if (!list_empty(&pchnl->io_completions)) {
602 		/*  Since DSPStream_Reclaim() does not take a timeout
603 		 *  parameter, we pass the stream's timeout value to
604 		 *  bridge_chnl_get_ioc. We cannot determine whether or not
605 		 *  we have waited in User mode. Since the stream's timeout
606 		 *  value may be non-zero, we still have to set the event.
607 		 *  Therefore, this optimization is taken out.
608 		 *
609 		 *  if (timeout == CHNL_IOCNOWAIT) {
610 		 *    ... ensure event is set..
611 		 *      sync_set_event(pchnl->sync_event);
612 		 *  } */
613 		sync_set_event(pchnl->sync_event);
614 	} else {
615 		/* else, if list is empty, ensure event is reset. */
616 		sync_reset_event(pchnl->sync_event);
617 	}
618 	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
619 	spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
620 	if (dequeue_ioc
621 	    && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
622 		if (!(ioc.buf < (void *)USERMODE_ADDR))
623 			goto func_cont;
624 
625 		/* If the addr is in user mode, then copy it */
626 		if (!host_sys_buf || !ioc.buf) {
627 			status = -EFAULT;
628 			goto func_cont;
629 		}
630 		if (!CHNL_IS_INPUT(pchnl->chnl_mode))
631 			goto func_cont1;
632 
633 		/*host_user_buf */
634 		status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size);
635 		if (status) {
636 			if (current->flags & PF_EXITING)
637 				status = 0;
638 		}
639 		if (status)
640 			status = -EFAULT;
641 func_cont1:
642 		kfree(host_sys_buf);
643 	}
644 func_cont:
645 	/* Update User's IOC block: */
646 	*chan_ioc = ioc;
647 func_end:
648 	return status;
649 }
650 
651 /*
652  *  ======== bridge_chnl_get_mgr_info ========
653  *      Retrieve information related to the channel manager.
654  */
bridge_chnl_get_mgr_info(struct chnl_mgr * hchnl_mgr,u32 ch_id,struct chnl_mgrinfo * mgr_info)655 int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
656 				 struct chnl_mgrinfo *mgr_info)
657 {
658 	struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
659 
660 	if (!mgr_info || !hchnl_mgr)
661 		return -EFAULT;
662 
663 	if (ch_id > CHNL_MAXCHANNELS)
664 		return -ECHRNG;
665 
666 	/* Return the requested information: */
667 	mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id];
668 	mgr_info->open_channels = chnl_mgr_obj->open_channels;
669 	mgr_info->type = chnl_mgr_obj->type;
670 	/* total # of chnls */
671 	mgr_info->max_channels = chnl_mgr_obj->max_channels;
672 
673 	return 0;
674 }
675 
676 /*
677  *  ======== bridge_chnl_idle ========
678  *      Idles a particular channel.
679  */
bridge_chnl_idle(struct chnl_object * chnl_obj,u32 timeout,bool flush_data)680 int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
681 			    bool flush_data)
682 {
683 	s8 chnl_mode;
684 	struct chnl_mgr *chnl_mgr_obj;
685 	int status = 0;
686 
687 	chnl_mode = chnl_obj->chnl_mode;
688 	chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
689 
690 	if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
691 		/* Wait for IO completions, up to the specified timeout: */
692 		status = bridge_chnl_flush_io(chnl_obj, timeout);
693 	} else {
694 		status = bridge_chnl_cancel_io(chnl_obj);
695 
696 		/* Reset the byte count and put channel back in ready state. */
697 		chnl_obj->bytes_moved = 0;
698 		chnl_obj->state &= ~CHNL_STATECANCEL;
699 	}
700 
701 	return status;
702 }
703 
704 /*
705  *  ======== bridge_chnl_open ========
706  *      Open a new half-duplex channel to the DSP board.
707  */
bridge_chnl_open(struct chnl_object ** chnl,struct chnl_mgr * hchnl_mgr,s8 chnl_mode,u32 ch_id,const struct chnl_attr * pattrs)708 int bridge_chnl_open(struct chnl_object **chnl,
709 			    struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
710 			    u32 ch_id, const struct chnl_attr *pattrs)
711 {
712 	int status = 0;
713 	struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
714 	struct chnl_object *pchnl = NULL;
715 	struct sync_object *sync_event = NULL;
716 
717 	*chnl = NULL;
718 
719 	/* Validate Args: */
720 	if (!pattrs->uio_reqs)
721 		return -EINVAL;
722 
723 	if (!hchnl_mgr)
724 		return -EFAULT;
725 
726 	if (ch_id != CHNL_PICKFREE) {
727 		if (ch_id >= chnl_mgr_obj->max_channels)
728 			return -ECHRNG;
729 		if (chnl_mgr_obj->channels[ch_id] != NULL)
730 			return -EALREADY;
731 	} else {
732 		/* Check for free channel */
733 		status = search_free_channel(chnl_mgr_obj, &ch_id);
734 		if (status)
735 			return status;
736 	}
737 
738 
739 	/* Create channel object: */
740 	pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
741 	if (!pchnl)
742 		return -ENOMEM;
743 
744 	/* Protect queues from io_dpc: */
745 	pchnl->state = CHNL_STATECANCEL;
746 
747 	/* Allocate initial IOR and IOC queues: */
748 	status = create_chirp_list(&pchnl->free_packets_list,
749 			pattrs->uio_reqs);
750 	if (status)
751 		goto out_err;
752 
753 	INIT_LIST_HEAD(&pchnl->io_requests);
754 	INIT_LIST_HEAD(&pchnl->io_completions);
755 
756 	pchnl->chnl_packets = pattrs->uio_reqs;
757 	pchnl->cio_cs = 0;
758 	pchnl->cio_reqs = 0;
759 
760 	sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
761 	if (!sync_event) {
762 		status = -ENOMEM;
763 		goto out_err;
764 	}
765 	sync_init_event(sync_event);
766 
767 	pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
768 	if (!pchnl->ntfy_obj) {
769 		status = -ENOMEM;
770 		goto out_err;
771 	}
772 	ntfy_init(pchnl->ntfy_obj);
773 
774 	/* Initialize CHNL object fields: */
775 	pchnl->chnl_mgr_obj = chnl_mgr_obj;
776 	pchnl->chnl_id = ch_id;
777 	pchnl->chnl_mode = chnl_mode;
778 	pchnl->user_event = sync_event;
779 	pchnl->sync_event = sync_event;
780 	/* Get the process handle */
781 	pchnl->process = current->tgid;
782 	pchnl->cb_arg = 0;
783 	pchnl->bytes_moved = 0;
784 	/* Default to proc-copy */
785 	pchnl->chnl_type = CHNL_PCPY;
786 
787 	/* Insert channel object in channel manager: */
788 	chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl;
789 	spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
790 	chnl_mgr_obj->open_channels++;
791 	spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
792 	/* Return result... */
793 	pchnl->state = CHNL_STATEREADY;
794 	*chnl = pchnl;
795 
796 	return status;
797 
798 out_err:
799 	/* Free memory */
800 	free_chirp_list(&pchnl->io_completions);
801 	free_chirp_list(&pchnl->io_requests);
802 	free_chirp_list(&pchnl->free_packets_list);
803 
804 	kfree(sync_event);
805 
806 	if (pchnl->ntfy_obj) {
807 		ntfy_delete(pchnl->ntfy_obj);
808 		kfree(pchnl->ntfy_obj);
809 		pchnl->ntfy_obj = NULL;
810 	}
811 	kfree(pchnl);
812 
813 	return status;
814 }
815 
816 /*
817  *  ======== bridge_chnl_register_notify ========
818  *      Registers for events on a particular channel.
819  */
bridge_chnl_register_notify(struct chnl_object * chnl_obj,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)820 int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
821 				    u32 event_mask, u32 notify_type,
822 				    struct dsp_notification *hnotification)
823 {
824 	int status = 0;
825 
826 
827 	if (event_mask)
828 		status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
829 						event_mask, notify_type);
830 	else
831 		status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
832 
833 	return status;
834 }
835 
836 /*
837  *  ======== create_chirp_list ========
838  *  Purpose:
839  *      Initialize a queue of channel I/O Request/Completion packets.
840  *  Parameters:
841  *      list:       Pointer to a list_head
842  *      chirps:     Number of Chirps to allocate.
843  *  Returns:
844  *      0 if successful, error code otherwise.
845  *  Requires:
846  *  Ensures:
847  */
create_chirp_list(struct list_head * list,u32 chirps)848 static int create_chirp_list(struct list_head *list, u32 chirps)
849 {
850 	struct chnl_irp *chirp;
851 	u32 i;
852 
853 	INIT_LIST_HEAD(list);
854 
855 	/* Make N chirps and place on queue. */
856 	for (i = 0; i < chirps; i++) {
857 		chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
858 		if (!chirp)
859 			break;
860 		list_add_tail(&chirp->link, list);
861 	}
862 
863 	/* If we couldn't allocate all chirps, free those allocated: */
864 	if (i != chirps) {
865 		free_chirp_list(list);
866 		return -ENOMEM;
867 	}
868 
869 	return 0;
870 }
871 
872 /*
873  *  ======== free_chirp_list ========
874  *  Purpose:
875  *      Free the queue of Chirps.
876  */
free_chirp_list(struct list_head * chirp_list)877 static void free_chirp_list(struct list_head *chirp_list)
878 {
879 	struct chnl_irp *chirp, *tmp;
880 
881 	list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
882 		list_del(&chirp->link);
883 		kfree(chirp);
884 	}
885 }
886 
887 /*
888  *  ======== search_free_channel ========
889  *      Search for a free channel slot in the array of channel pointers.
890  */
search_free_channel(struct chnl_mgr * chnl_mgr_obj,u32 * chnl)891 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
892 				      u32 *chnl)
893 {
894 	int status = -ENOSR;
895 	u32 i;
896 
897 	for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
898 		if (chnl_mgr_obj->channels[i] == NULL) {
899 			status = 0;
900 			*chnl = i;
901 			break;
902 		}
903 	}
904 
905 	return status;
906 }
907