1 /*
2  * drv.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * DSP/BIOS Bridge resource allocation module.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 #include <linux/types.h>
19 #include <linux/list.h>
20 
21 /*  ----------------------------------- Host OS */
22 #include <dspbridge/host_os.h>
23 
24 /*  ----------------------------------- DSP/BIOS Bridge */
25 #include <dspbridge/dbdefs.h>
26 
27 /*  ----------------------------------- Trace & Debug */
28 #include <dspbridge/dbc.h>
29 
30 /*  ----------------------------------- This */
31 #include <dspbridge/drv.h>
32 #include <dspbridge/dev.h>
33 
34 #include <dspbridge/node.h>
35 #include <dspbridge/proc.h>
36 #include <dspbridge/strm.h>
37 #include <dspbridge/nodepriv.h>
38 #include <dspbridge/dspchnl.h>
39 #include <dspbridge/resourcecleanup.h>
40 
41 /*  ----------------------------------- Defines, Data Structures, Typedefs */
42 struct drv_object {
43 	struct list_head dev_list;
44 	struct list_head dev_node_string;
45 };
46 
47 /*
48  *  This is the Device Extension. Named with the Prefix
49  *  DRV_ since it is living in this module
50  */
51 struct drv_ext {
52 	struct list_head link;
53 	char sz_string[MAXREGPATHLENGTH];
54 };
55 
56 /*  ----------------------------------- Globals */
57 static s32 refs;
58 static bool ext_phys_mem_pool_enabled;
59 struct ext_phys_mem_pool {
60 	u32 phys_mem_base;
61 	u32 phys_mem_size;
62 	u32 virt_mem_base;
63 	u32 next_phys_alloc_ptr;
64 };
65 static struct ext_phys_mem_pool ext_mem_pool;
66 
67 /*  ----------------------------------- Function Prototypes */
68 static int request_bridge_resources(struct cfg_hostres *res);
69 
70 
71 /* GPP PROCESS CLEANUP CODE */
72 
73 static int drv_proc_free_node_res(int id, void *p, void *data);
74 
75 /* Allocate and add a node resource element
76 * This function is called from .Node_Allocate. */
drv_insert_node_res_element(void * hnode,void * node_resource,void * process_ctxt)77 int drv_insert_node_res_element(void *hnode, void *node_resource,
78 				       void *process_ctxt)
79 {
80 	struct node_res_object **node_res_obj =
81 	    (struct node_res_object **)node_resource;
82 	struct process_context *ctxt = (struct process_context *)process_ctxt;
83 	int status = 0;
84 	int retval;
85 
86 	*node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
87 	if (!*node_res_obj) {
88 		status = -ENOMEM;
89 		goto func_end;
90 	}
91 
92 	(*node_res_obj)->node = hnode;
93 	retval = idr_get_new(ctxt->node_id, *node_res_obj,
94 						&(*node_res_obj)->id);
95 	if (retval == -EAGAIN) {
96 		if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
97 			pr_err("%s: OUT OF MEMORY\n", __func__);
98 			status = -ENOMEM;
99 			goto func_end;
100 		}
101 
102 		retval = idr_get_new(ctxt->node_id, *node_res_obj,
103 						&(*node_res_obj)->id);
104 	}
105 	if (retval) {
106 		pr_err("%s: FAILED, IDR is FULL\n", __func__);
107 		status = -EFAULT;
108 	}
109 func_end:
110 	if (status)
111 		kfree(*node_res_obj);
112 
113 	return status;
114 }
115 
116 /* Release all Node resources and its context
117  * Actual Node De-Allocation */
drv_proc_free_node_res(int id,void * p,void * data)118 static int drv_proc_free_node_res(int id, void *p, void *data)
119 {
120 	struct process_context *ctxt = data;
121 	int status;
122 	struct node_res_object *node_res_obj = p;
123 	u32 node_state;
124 
125 	if (node_res_obj->node_allocated) {
126 		node_state = node_get_state(node_res_obj->node);
127 		if (node_state <= NODE_DELETING) {
128 			if ((node_state == NODE_RUNNING) ||
129 			    (node_state == NODE_PAUSED) ||
130 			    (node_state == NODE_TERMINATING))
131 				node_terminate
132 				    (node_res_obj->node, &status);
133 
134 			node_delete(node_res_obj, ctxt);
135 		}
136 	}
137 
138 	return 0;
139 }
140 
141 /* Release all Mapped and Reserved DMM resources */
drv_remove_all_dmm_res_elements(void * process_ctxt)142 int drv_remove_all_dmm_res_elements(void *process_ctxt)
143 {
144 	struct process_context *ctxt = (struct process_context *)process_ctxt;
145 	int status = 0;
146 	struct dmm_map_object *temp_map, *map_obj;
147 	struct dmm_rsv_object *temp_rsv, *rsv_obj;
148 
149 	/* Free DMM mapped memory resources */
150 	list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
151 		status = proc_un_map(ctxt->processor,
152 				     (void *)map_obj->dsp_addr, ctxt);
153 		if (status)
154 			pr_err("%s: proc_un_map failed!"
155 			       " status = 0x%xn", __func__, status);
156 	}
157 
158 	/* Free DMM reserved memory resources */
159 	list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
160 		status = proc_un_reserve_memory(ctxt->processor, (void *)
161 						rsv_obj->dsp_reserved_addr,
162 						ctxt);
163 		if (status)
164 			pr_err("%s: proc_un_reserve_memory failed!"
165 			       " status = 0x%xn", __func__, status);
166 	}
167 	return status;
168 }
169 
170 /* Update Node allocation status */
drv_proc_node_update_status(void * node_resource,s32 status)171 void drv_proc_node_update_status(void *node_resource, s32 status)
172 {
173 	struct node_res_object *node_res_obj =
174 	    (struct node_res_object *)node_resource;
175 	DBC_ASSERT(node_resource != NULL);
176 	node_res_obj->node_allocated = status;
177 }
178 
179 /* Update Node Heap status */
drv_proc_node_update_heap_status(void * node_resource,s32 status)180 void drv_proc_node_update_heap_status(void *node_resource, s32 status)
181 {
182 	struct node_res_object *node_res_obj =
183 	    (struct node_res_object *)node_resource;
184 	DBC_ASSERT(node_resource != NULL);
185 	node_res_obj->heap_allocated = status;
186 }
187 
188 /* Release all Node resources and its context
189 * This is called from .bridge_release.
190  */
drv_remove_all_node_res_elements(void * process_ctxt)191 int drv_remove_all_node_res_elements(void *process_ctxt)
192 {
193 	struct process_context *ctxt = process_ctxt;
194 
195 	idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
196 	idr_destroy(ctxt->node_id);
197 
198 	return 0;
199 }
200 
201 /* Allocate the STRM resource element
202 * This is called after the actual resource is allocated
203  */
drv_proc_insert_strm_res_element(void * stream_obj,void * strm_res,void * process_ctxt)204 int drv_proc_insert_strm_res_element(void *stream_obj,
205 					    void *strm_res, void *process_ctxt)
206 {
207 	struct strm_res_object **pstrm_res =
208 	    (struct strm_res_object **)strm_res;
209 	struct process_context *ctxt = (struct process_context *)process_ctxt;
210 	int status = 0;
211 	int retval;
212 
213 	*pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
214 	if (*pstrm_res == NULL) {
215 		status = -EFAULT;
216 		goto func_end;
217 	}
218 
219 	(*pstrm_res)->stream = stream_obj;
220 	retval = idr_get_new(ctxt->stream_id, *pstrm_res,
221 						&(*pstrm_res)->id);
222 	if (retval == -EAGAIN) {
223 		if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
224 			pr_err("%s: OUT OF MEMORY\n", __func__);
225 			status = -ENOMEM;
226 			goto func_end;
227 		}
228 
229 		retval = idr_get_new(ctxt->stream_id, *pstrm_res,
230 						&(*pstrm_res)->id);
231 	}
232 	if (retval) {
233 		pr_err("%s: FAILED, IDR is FULL\n", __func__);
234 		status = -EPERM;
235 	}
236 
237 func_end:
238 	return status;
239 }
240 
drv_proc_free_strm_res(int id,void * p,void * process_ctxt)241 static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
242 {
243 	struct process_context *ctxt = process_ctxt;
244 	struct strm_res_object *strm_res = p;
245 	struct stream_info strm_info;
246 	struct dsp_streaminfo user;
247 	u8 **ap_buffer = NULL;
248 	u8 *buf_ptr;
249 	u32 ul_bytes;
250 	u32 dw_arg;
251 	s32 ul_buf_size;
252 
253 	if (strm_res->num_bufs) {
254 		ap_buffer = kmalloc((strm_res->num_bufs *
255 				       sizeof(u8 *)), GFP_KERNEL);
256 		if (ap_buffer) {
257 			strm_free_buffer(strm_res,
258 						  ap_buffer,
259 						  strm_res->num_bufs,
260 						  ctxt);
261 			kfree(ap_buffer);
262 		}
263 	}
264 	strm_info.user_strm = &user;
265 	user.number_bufs_in_stream = 0;
266 	strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info));
267 	while (user.number_bufs_in_stream--)
268 		strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes,
269 			     (u32 *) &ul_buf_size, &dw_arg);
270 	strm_close(strm_res, ctxt);
271 	return 0;
272 }
273 
274 /* Release all Stream resources and its context
275 * This is called from .bridge_release.
276  */
drv_remove_all_strm_res_elements(void * process_ctxt)277 int drv_remove_all_strm_res_elements(void *process_ctxt)
278 {
279 	struct process_context *ctxt = process_ctxt;
280 
281 	idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
282 	idr_destroy(ctxt->stream_id);
283 
284 	return 0;
285 }
286 
287 /* Updating the stream resource element */
drv_proc_update_strm_res(u32 num_bufs,void * strm_resources)288 int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
289 {
290 	int status = 0;
291 	struct strm_res_object **strm_res =
292 	    (struct strm_res_object **)strm_resources;
293 
294 	(*strm_res)->num_bufs = num_bufs;
295 	return status;
296 }
297 
298 /* GPP PROCESS CLEANUP CODE END */
299 
300 /*
301  *  ======== = drv_create ======== =
302  *  Purpose:
303  *      DRV Object gets created only once during Driver Loading.
304  */
drv_create(struct drv_object ** drv_obj)305 int drv_create(struct drv_object **drv_obj)
306 {
307 	int status = 0;
308 	struct drv_object *pdrv_object = NULL;
309 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
310 
311 	DBC_REQUIRE(drv_obj != NULL);
312 	DBC_REQUIRE(refs > 0);
313 
314 	pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
315 	if (pdrv_object) {
316 		/* Create and Initialize List of device objects */
317 		INIT_LIST_HEAD(&pdrv_object->dev_list);
318 		INIT_LIST_HEAD(&pdrv_object->dev_node_string);
319 	} else {
320 		status = -ENOMEM;
321 	}
322 	/* Store the DRV Object in the driver data */
323 	if (!status) {
324 		if (drv_datap) {
325 			drv_datap->drv_object = (void *)pdrv_object;
326 		} else {
327 			status = -EPERM;
328 			pr_err("%s: Failed to store DRV object\n", __func__);
329 		}
330 	}
331 
332 	if (!status) {
333 		*drv_obj = pdrv_object;
334 	} else {
335 		/* Free the DRV Object */
336 		kfree(pdrv_object);
337 	}
338 
339 	DBC_ENSURE(status || pdrv_object);
340 	return status;
341 }
342 
343 /*
344  *  ======== drv_exit ========
345  *  Purpose:
346  *      Discontinue usage of the DRV module.
347  */
drv_exit(void)348 void drv_exit(void)
349 {
350 	DBC_REQUIRE(refs > 0);
351 
352 	refs--;
353 
354 	DBC_ENSURE(refs >= 0);
355 }
356 
357 /*
358  *  ======== = drv_destroy ======== =
359  *  purpose:
360  *      Invoked during bridge de-initialization
361  */
drv_destroy(struct drv_object * driver_obj)362 int drv_destroy(struct drv_object *driver_obj)
363 {
364 	int status = 0;
365 	struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
366 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
367 
368 	DBC_REQUIRE(refs > 0);
369 	DBC_REQUIRE(pdrv_object);
370 
371 	kfree(pdrv_object);
372 	/* Update the DRV Object in the driver data */
373 	if (drv_datap) {
374 		drv_datap->drv_object = NULL;
375 	} else {
376 		status = -EPERM;
377 		pr_err("%s: Failed to store DRV object\n", __func__);
378 	}
379 
380 	return status;
381 }
382 
383 /*
384  *  ======== drv_get_dev_object ========
385  *  Purpose:
386  *      Given a index, returns a handle to DevObject from the list.
387  */
drv_get_dev_object(u32 index,struct drv_object * hdrv_obj,struct dev_object ** device_obj)388 int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
389 			      struct dev_object **device_obj)
390 {
391 	int status = 0;
392 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
393 	/* used only for Assertions and debug messages */
394 	struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
395 #endif
396 	struct dev_object *dev_obj;
397 	u32 i;
398 	DBC_REQUIRE(pdrv_obj);
399 	DBC_REQUIRE(device_obj != NULL);
400 	DBC_REQUIRE(index >= 0);
401 	DBC_REQUIRE(refs > 0);
402 	DBC_ASSERT(!(list_empty(&pdrv_obj->dev_list)));
403 
404 	dev_obj = (struct dev_object *)drv_get_first_dev_object();
405 	for (i = 0; i < index; i++) {
406 		dev_obj =
407 		    (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
408 	}
409 	if (dev_obj) {
410 		*device_obj = (struct dev_object *)dev_obj;
411 	} else {
412 		*device_obj = NULL;
413 		status = -EPERM;
414 	}
415 
416 	return status;
417 }
418 
419 /*
420  *  ======== drv_get_first_dev_object ========
421  *  Purpose:
422  *      Retrieve the first Device Object handle from an internal linked list of
423  *      of DEV_OBJECTs maintained by DRV.
424  */
drv_get_first_dev_object(void)425 u32 drv_get_first_dev_object(void)
426 {
427 	u32 dw_dev_object = 0;
428 	struct drv_object *pdrv_obj;
429 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
430 
431 	if (drv_datap && drv_datap->drv_object) {
432 		pdrv_obj = drv_datap->drv_object;
433 		if (!list_empty(&pdrv_obj->dev_list))
434 			dw_dev_object = (u32) pdrv_obj->dev_list.next;
435 	} else {
436 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
437 	}
438 
439 	return dw_dev_object;
440 }
441 
442 /*
443  *  ======== DRV_GetFirstDevNodeString ========
444  *  Purpose:
445  *      Retrieve the first Device Extension from an internal linked list of
446  *      of Pointer to dev_node Strings maintained by DRV.
447  */
drv_get_first_dev_extension(void)448 u32 drv_get_first_dev_extension(void)
449 {
450 	u32 dw_dev_extension = 0;
451 	struct drv_object *pdrv_obj;
452 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
453 
454 	if (drv_datap && drv_datap->drv_object) {
455 		pdrv_obj = drv_datap->drv_object;
456 		if (!list_empty(&pdrv_obj->dev_node_string)) {
457 			dw_dev_extension =
458 			    (u32) pdrv_obj->dev_node_string.next;
459 		}
460 	} else {
461 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
462 	}
463 
464 	return dw_dev_extension;
465 }
466 
467 /*
468  *  ======== drv_get_next_dev_object ========
469  *  Purpose:
470  *      Retrieve the next Device Object handle from an internal linked list of
471  *      of DEV_OBJECTs maintained by DRV, after having previously called
472  *      drv_get_first_dev_object() and zero or more DRV_GetNext.
473  */
drv_get_next_dev_object(u32 hdev_obj)474 u32 drv_get_next_dev_object(u32 hdev_obj)
475 {
476 	u32 dw_next_dev_object = 0;
477 	struct drv_object *pdrv_obj;
478 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
479 	struct list_head *curr;
480 
481 	if (drv_datap && drv_datap->drv_object) {
482 		pdrv_obj = drv_datap->drv_object;
483 		if (!list_empty(&pdrv_obj->dev_list)) {
484 			curr = (struct list_head *)hdev_obj;
485 			if (list_is_last(curr, &pdrv_obj->dev_list))
486 				return 0;
487 			dw_next_dev_object = (u32) curr->next;
488 		}
489 	} else {
490 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
491 	}
492 
493 	return dw_next_dev_object;
494 }
495 
496 /*
497  *  ======== drv_get_next_dev_extension ========
498  *  Purpose:
499  *      Retrieve the next Device Extension from an internal linked list of
500  *      of pointer to DevNodeString maintained by DRV, after having previously
501  *      called drv_get_first_dev_extension() and zero or more
502  *      drv_get_next_dev_extension().
503  */
drv_get_next_dev_extension(u32 dev_extension)504 u32 drv_get_next_dev_extension(u32 dev_extension)
505 {
506 	u32 dw_dev_extension = 0;
507 	struct drv_object *pdrv_obj;
508 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
509 	struct list_head *curr;
510 
511 	if (drv_datap && drv_datap->drv_object) {
512 		pdrv_obj = drv_datap->drv_object;
513 		if (!list_empty(&pdrv_obj->dev_node_string)) {
514 			curr = (struct list_head *)dev_extension;
515 			if (list_is_last(curr, &pdrv_obj->dev_node_string))
516 				return 0;
517 			dw_dev_extension = (u32) curr->next;
518 		}
519 	} else {
520 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
521 	}
522 
523 	return dw_dev_extension;
524 }
525 
526 /*
527  *  ======== drv_init ========
528  *  Purpose:
529  *      Initialize DRV module private state.
530  */
drv_init(void)531 int drv_init(void)
532 {
533 	s32 ret = 1;		/* function return value */
534 
535 	DBC_REQUIRE(refs >= 0);
536 
537 	if (ret)
538 		refs++;
539 
540 	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
541 
542 	return ret;
543 }
544 
545 /*
546  *  ======== drv_insert_dev_object ========
547  *  Purpose:
548  *      Insert a DevObject into the list of Manager object.
549  */
drv_insert_dev_object(struct drv_object * driver_obj,struct dev_object * hdev_obj)550 int drv_insert_dev_object(struct drv_object *driver_obj,
551 				 struct dev_object *hdev_obj)
552 {
553 	struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
554 
555 	DBC_REQUIRE(refs > 0);
556 	DBC_REQUIRE(hdev_obj != NULL);
557 	DBC_REQUIRE(pdrv_object);
558 
559 	list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
560 
561 	return 0;
562 }
563 
564 /*
565  *  ======== drv_remove_dev_object ========
566  *  Purpose:
567  *      Search for and remove a DeviceObject from the given list of DRV
568  *      objects.
569  */
drv_remove_dev_object(struct drv_object * driver_obj,struct dev_object * hdev_obj)570 int drv_remove_dev_object(struct drv_object *driver_obj,
571 				 struct dev_object *hdev_obj)
572 {
573 	int status = -EPERM;
574 	struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
575 	struct list_head *cur_elem;
576 
577 	DBC_REQUIRE(refs > 0);
578 	DBC_REQUIRE(pdrv_object);
579 	DBC_REQUIRE(hdev_obj != NULL);
580 
581 	DBC_REQUIRE(!list_empty(&pdrv_object->dev_list));
582 
583 	/* Search list for p_proc_object: */
584 	list_for_each(cur_elem, &pdrv_object->dev_list) {
585 		/* If found, remove it. */
586 		if ((struct dev_object *)cur_elem == hdev_obj) {
587 			list_del(cur_elem);
588 			status = 0;
589 			break;
590 		}
591 	}
592 
593 	return status;
594 }
595 
596 /*
597  *  ======== drv_request_resources ========
598  *  Purpose:
599  *      Requests  resources from the OS.
600  */
drv_request_resources(u32 dw_context,u32 * dev_node_strg)601 int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
602 {
603 	int status = 0;
604 	struct drv_object *pdrv_object;
605 	struct drv_ext *pszdev_node;
606 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
607 
608 	DBC_REQUIRE(dw_context != 0);
609 	DBC_REQUIRE(dev_node_strg != NULL);
610 
611 	/*
612 	 *  Allocate memory to hold the string. This will live until
613 	 *  it is freed in the Release resources. Update the driver object
614 	 *  list.
615 	 */
616 
617 	if (!drv_datap || !drv_datap->drv_object)
618 		status = -ENODATA;
619 	else
620 		pdrv_object = drv_datap->drv_object;
621 
622 	if (!status) {
623 		pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
624 		if (pszdev_node) {
625 			strncpy(pszdev_node->sz_string,
626 				(char *)dw_context, MAXREGPATHLENGTH - 1);
627 			pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
628 			/* Update the Driver Object List */
629 			*dev_node_strg = (u32) pszdev_node->sz_string;
630 			list_add_tail(&pszdev_node->link,
631 					&pdrv_object->dev_node_string);
632 		} else {
633 			status = -ENOMEM;
634 			*dev_node_strg = 0;
635 		}
636 	} else {
637 		dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
638 			__func__);
639 		*dev_node_strg = 0;
640 	}
641 
642 	DBC_ENSURE((!status && dev_node_strg != NULL &&
643 		    !list_empty(&pdrv_object->dev_node_string)) ||
644 		   (status && *dev_node_strg == 0));
645 
646 	return status;
647 }
648 
649 /*
650  *  ======== drv_release_resources ========
651  *  Purpose:
652  *      Releases  resources from the OS.
653  */
drv_release_resources(u32 dw_context,struct drv_object * hdrv_obj)654 int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
655 {
656 	int status = 0;
657 	struct drv_ext *pszdev_node;
658 
659 	/*
660 	 *  Irrespective of the status go ahead and clean it
661 	 *  The following will over write the status.
662 	 */
663 	for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
664 	     pszdev_node != NULL; pszdev_node = (struct drv_ext *)
665 	     drv_get_next_dev_extension((u32) pszdev_node)) {
666 		if ((u32) pszdev_node == dw_context) {
667 			/* Found it */
668 			/* Delete from the Driver object list */
669 			list_del(&pszdev_node->link);
670 			kfree(pszdev_node);
671 			break;
672 		}
673 	}
674 	return status;
675 }
676 
677 /*
678  *  ======== request_bridge_resources ========
679  *  Purpose:
680  *      Reserves shared memory for bridge.
681  */
request_bridge_resources(struct cfg_hostres * res)682 static int request_bridge_resources(struct cfg_hostres *res)
683 {
684 	struct cfg_hostres *host_res = res;
685 
686 	/* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
687 	host_res->num_mem_windows = 2;
688 
689 	/* First window is for DSP internal memory */
690 	dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
691 	dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
692 	dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
693 
694 	/* for 24xx base port is not mapping the mamory for DSP
695 	 * internal memory TODO Do a ioremap here */
696 	/* Second window is for DSP external memory shared with MPU */
697 
698 	/* These are hard-coded values */
699 	host_res->birq_registers = 0;
700 	host_res->birq_attrib = 0;
701 	host_res->offset_for_monitor = 0;
702 	host_res->chnl_offset = 0;
703 	/* CHNL_MAXCHANNELS */
704 	host_res->num_chnls = CHNL_MAXCHANNELS;
705 	host_res->chnl_buf_size = 0x400;
706 
707 	return 0;
708 }
709 
710 /*
711  *  ======== drv_request_bridge_res_dsp ========
712  *  Purpose:
713  *      Reserves shared memory for bridge.
714  */
drv_request_bridge_res_dsp(void ** phost_resources)715 int drv_request_bridge_res_dsp(void **phost_resources)
716 {
717 	int status = 0;
718 	struct cfg_hostres *host_res;
719 	u32 dw_buff_size;
720 	u32 dma_addr;
721 	u32 shm_size;
722 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
723 
724 	dw_buff_size = sizeof(struct cfg_hostres);
725 
726 	host_res = kzalloc(dw_buff_size, GFP_KERNEL);
727 
728 	if (host_res != NULL) {
729 		request_bridge_resources(host_res);
730 		/* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
731 		host_res->num_mem_windows = 4;
732 
733 		host_res->mem_base[0] = 0;
734 		host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
735 							 OMAP_DSP_MEM1_SIZE);
736 		host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
737 							 OMAP_DSP_MEM2_SIZE);
738 		host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
739 							 OMAP_DSP_MEM3_SIZE);
740 		host_res->per_base = ioremap(OMAP_PER_CM_BASE,
741 						OMAP_PER_CM_SIZE);
742 		host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
743 							 OMAP_PER_PRM_SIZE);
744 		host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
745 							  OMAP_CORE_PRM_SIZE);
746 		host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
747 						 OMAP_DMMU_SIZE);
748 
749 		dev_dbg(bridge, "mem_base[0] 0x%x\n",
750 			host_res->mem_base[0]);
751 		dev_dbg(bridge, "mem_base[1] 0x%x\n",
752 			host_res->mem_base[1]);
753 		dev_dbg(bridge, "mem_base[2] 0x%x\n",
754 			host_res->mem_base[2]);
755 		dev_dbg(bridge, "mem_base[3] 0x%x\n",
756 			host_res->mem_base[3]);
757 		dev_dbg(bridge, "mem_base[4] 0x%x\n",
758 			host_res->mem_base[4]);
759 		dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
760 
761 		shm_size = drv_datap->shm_size;
762 		if (shm_size >= 0x10000) {
763 			/* Allocate Physically contiguous,
764 			 * non-cacheable  memory */
765 			host_res->mem_base[1] =
766 			    (u32) mem_alloc_phys_mem(shm_size, 0x100000,
767 						     &dma_addr);
768 			if (host_res->mem_base[1] == 0) {
769 				status = -ENOMEM;
770 				pr_err("shm reservation Failed\n");
771 			} else {
772 				host_res->mem_length[1] = shm_size;
773 				host_res->mem_phys[1] = dma_addr;
774 
775 				dev_dbg(bridge, "%s: Bridge shm address 0x%x "
776 					"dma_addr %x size %x\n", __func__,
777 					host_res->mem_base[1],
778 					dma_addr, shm_size);
779 			}
780 		}
781 		if (!status) {
782 			/* These are hard-coded values */
783 			host_res->birq_registers = 0;
784 			host_res->birq_attrib = 0;
785 			host_res->offset_for_monitor = 0;
786 			host_res->chnl_offset = 0;
787 			/* CHNL_MAXCHANNELS */
788 			host_res->num_chnls = CHNL_MAXCHANNELS;
789 			host_res->chnl_buf_size = 0x400;
790 			dw_buff_size = sizeof(struct cfg_hostres);
791 		}
792 		*phost_resources = host_res;
793 	}
794 	/* End Mem alloc */
795 	return status;
796 }
797 
mem_ext_phys_pool_init(u32 pool_phys_base,u32 pool_size)798 void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
799 {
800 	u32 pool_virt_base;
801 
802 	/* get the virtual address for the physical memory pool passed */
803 	pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
804 
805 	if ((void **)pool_virt_base == NULL) {
806 		pr_err("%s: external physical memory map failed\n", __func__);
807 		ext_phys_mem_pool_enabled = false;
808 	} else {
809 		ext_mem_pool.phys_mem_base = pool_phys_base;
810 		ext_mem_pool.phys_mem_size = pool_size;
811 		ext_mem_pool.virt_mem_base = pool_virt_base;
812 		ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
813 		ext_phys_mem_pool_enabled = true;
814 	}
815 }
816 
mem_ext_phys_pool_release(void)817 void mem_ext_phys_pool_release(void)
818 {
819 	if (ext_phys_mem_pool_enabled) {
820 		iounmap((void *)(ext_mem_pool.virt_mem_base));
821 		ext_phys_mem_pool_enabled = false;
822 	}
823 }
824 
825 /*
826  *  ======== mem_ext_phys_mem_alloc ========
827  *  Purpose:
828  *     Allocate physically contiguous, uncached memory from external memory pool
829  */
830 
mem_ext_phys_mem_alloc(u32 bytes,u32 align,u32 * phys_addr)831 static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
832 {
833 	u32 new_alloc_ptr;
834 	u32 offset;
835 	u32 virt_addr;
836 
837 	if (align == 0)
838 		align = 1;
839 
840 	if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
841 		     - ext_mem_pool.next_phys_alloc_ptr)) {
842 		phys_addr = NULL;
843 		return NULL;
844 	} else {
845 		offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
846 		if (offset == 0)
847 			new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
848 		else
849 			new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
850 			    (align - offset);
851 		if ((new_alloc_ptr + bytes) <=
852 		    (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
853 			/* we can allocate */
854 			*phys_addr = new_alloc_ptr;
855 			ext_mem_pool.next_phys_alloc_ptr =
856 			    new_alloc_ptr + bytes;
857 			virt_addr =
858 			    ext_mem_pool.virt_mem_base + (new_alloc_ptr -
859 							  ext_mem_pool.
860 							  phys_mem_base);
861 			return (void *)virt_addr;
862 		} else {
863 			*phys_addr = 0;
864 			return NULL;
865 		}
866 	}
867 }
868 
869 /*
870  *  ======== mem_alloc_phys_mem ========
871  *  Purpose:
872  *      Allocate physically contiguous, uncached memory
873  */
mem_alloc_phys_mem(u32 byte_size,u32 align_mask,u32 * physical_address)874 void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
875 				u32 *physical_address)
876 {
877 	void *va_mem = NULL;
878 	dma_addr_t pa_mem;
879 
880 	if (byte_size > 0) {
881 		if (ext_phys_mem_pool_enabled) {
882 			va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
883 							(u32 *) &pa_mem);
884 		} else
885 			va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
886 								GFP_KERNEL);
887 		if (va_mem == NULL)
888 			*physical_address = 0;
889 		else
890 			*physical_address = pa_mem;
891 	}
892 	return va_mem;
893 }
894 
895 /*
896  *  ======== mem_free_phys_mem ========
897  *  Purpose:
898  *      Free the given block of physically contiguous memory.
899  */
mem_free_phys_mem(void * virtual_address,u32 physical_address,u32 byte_size)900 void mem_free_phys_mem(void *virtual_address, u32 physical_address,
901 		       u32 byte_size)
902 {
903 	DBC_REQUIRE(virtual_address != NULL);
904 
905 	if (!ext_phys_mem_pool_enabled)
906 		dma_free_coherent(NULL, byte_size, virtual_address,
907 				  physical_address);
908 }
909