1 /*
2  * drv.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * DSP/BIOS Bridge resource allocation module.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 #include <linux/types.h>
19 #include <linux/list.h>
20 
21 /*  ----------------------------------- Host OS */
22 #include <dspbridge/host_os.h>
23 
24 /*  ----------------------------------- DSP/BIOS Bridge */
25 #include <dspbridge/dbdefs.h>
26 
27 /*  ----------------------------------- This */
28 #include <dspbridge/drv.h>
29 #include <dspbridge/dev.h>
30 
31 #include <dspbridge/node.h>
32 #include <dspbridge/proc.h>
33 #include <dspbridge/strm.h>
34 #include <dspbridge/nodepriv.h>
35 #include <dspbridge/dspchnl.h>
36 #include <dspbridge/resourcecleanup.h>
37 
38 /*  ----------------------------------- Defines, Data Structures, Typedefs */
39 struct drv_object {
40 	struct list_head dev_list;
41 	struct list_head dev_node_string;
42 };
43 
44 /*
45  *  This is the Device Extension. Named with the Prefix
46  *  DRV_ since it is living in this module
47  */
48 struct drv_ext {
49 	struct list_head link;
50 	char sz_string[MAXREGPATHLENGTH];
51 };
52 
53 /*  ----------------------------------- Globals */
54 static bool ext_phys_mem_pool_enabled;
55 struct ext_phys_mem_pool {
56 	u32 phys_mem_base;
57 	u32 phys_mem_size;
58 	u32 virt_mem_base;
59 	u32 next_phys_alloc_ptr;
60 };
61 static struct ext_phys_mem_pool ext_mem_pool;
62 
63 /*  ----------------------------------- Function Prototypes */
64 static int request_bridge_resources(struct cfg_hostres *res);
65 
66 
67 /* GPP PROCESS CLEANUP CODE */
68 
69 static int drv_proc_free_node_res(int id, void *p, void *data);
70 
71 /* Allocate and add a node resource element
72 * This function is called from .Node_Allocate. */
drv_insert_node_res_element(void * hnode,void * node_resource,void * process_ctxt)73 int drv_insert_node_res_element(void *hnode, void *node_resource,
74 				       void *process_ctxt)
75 {
76 	struct node_res_object **node_res_obj =
77 	    (struct node_res_object **)node_resource;
78 	struct process_context *ctxt = (struct process_context *)process_ctxt;
79 	int status = 0;
80 	int retval;
81 
82 	*node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
83 	if (!*node_res_obj) {
84 		status = -ENOMEM;
85 		goto func_end;
86 	}
87 
88 	(*node_res_obj)->node = hnode;
89 	retval = idr_get_new(ctxt->node_id, *node_res_obj,
90 						&(*node_res_obj)->id);
91 	if (retval == -EAGAIN) {
92 		if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
93 			pr_err("%s: OUT OF MEMORY\n", __func__);
94 			status = -ENOMEM;
95 			goto func_end;
96 		}
97 
98 		retval = idr_get_new(ctxt->node_id, *node_res_obj,
99 						&(*node_res_obj)->id);
100 	}
101 	if (retval) {
102 		pr_err("%s: FAILED, IDR is FULL\n", __func__);
103 		status = -EFAULT;
104 	}
105 func_end:
106 	if (status)
107 		kfree(*node_res_obj);
108 
109 	return status;
110 }
111 
112 /* Release all Node resources and its context
113  * Actual Node De-Allocation */
drv_proc_free_node_res(int id,void * p,void * data)114 static int drv_proc_free_node_res(int id, void *p, void *data)
115 {
116 	struct process_context *ctxt = data;
117 	int status;
118 	struct node_res_object *node_res_obj = p;
119 	u32 node_state;
120 
121 	if (node_res_obj->node_allocated) {
122 		node_state = node_get_state(node_res_obj->node);
123 		if (node_state <= NODE_DELETING) {
124 			if ((node_state == NODE_RUNNING) ||
125 			    (node_state == NODE_PAUSED) ||
126 			    (node_state == NODE_TERMINATING))
127 				node_terminate
128 				    (node_res_obj->node, &status);
129 
130 			node_delete(node_res_obj, ctxt);
131 		}
132 	}
133 
134 	return 0;
135 }
136 
137 /* Release all Mapped and Reserved DMM resources */
drv_remove_all_dmm_res_elements(void * process_ctxt)138 int drv_remove_all_dmm_res_elements(void *process_ctxt)
139 {
140 	struct process_context *ctxt = (struct process_context *)process_ctxt;
141 	int status = 0;
142 	struct dmm_map_object *temp_map, *map_obj;
143 	struct dmm_rsv_object *temp_rsv, *rsv_obj;
144 
145 	/* Free DMM mapped memory resources */
146 	list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
147 		status = proc_un_map(ctxt->processor,
148 				     (void *)map_obj->dsp_addr, ctxt);
149 		if (status)
150 			pr_err("%s: proc_un_map failed!"
151 			       " status = 0x%xn", __func__, status);
152 	}
153 
154 	/* Free DMM reserved memory resources */
155 	list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
156 		status = proc_un_reserve_memory(ctxt->processor, (void *)
157 						rsv_obj->dsp_reserved_addr,
158 						ctxt);
159 		if (status)
160 			pr_err("%s: proc_un_reserve_memory failed!"
161 			       " status = 0x%xn", __func__, status);
162 	}
163 	return status;
164 }
165 
166 /* Update Node allocation status */
drv_proc_node_update_status(void * node_resource,s32 status)167 void drv_proc_node_update_status(void *node_resource, s32 status)
168 {
169 	struct node_res_object *node_res_obj =
170 	    (struct node_res_object *)node_resource;
171 	node_res_obj->node_allocated = status;
172 }
173 
174 /* Update Node Heap status */
drv_proc_node_update_heap_status(void * node_resource,s32 status)175 void drv_proc_node_update_heap_status(void *node_resource, s32 status)
176 {
177 	struct node_res_object *node_res_obj =
178 	    (struct node_res_object *)node_resource;
179 	node_res_obj->heap_allocated = status;
180 }
181 
182 /* Release all Node resources and its context
183 * This is called from .bridge_release.
184  */
drv_remove_all_node_res_elements(void * process_ctxt)185 int drv_remove_all_node_res_elements(void *process_ctxt)
186 {
187 	struct process_context *ctxt = process_ctxt;
188 
189 	idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
190 	idr_destroy(ctxt->node_id);
191 
192 	return 0;
193 }
194 
195 /* Allocate the STRM resource element
196 * This is called after the actual resource is allocated
197  */
drv_proc_insert_strm_res_element(void * stream_obj,void * strm_res,void * process_ctxt)198 int drv_proc_insert_strm_res_element(void *stream_obj,
199 					    void *strm_res, void *process_ctxt)
200 {
201 	struct strm_res_object **pstrm_res =
202 	    (struct strm_res_object **)strm_res;
203 	struct process_context *ctxt = (struct process_context *)process_ctxt;
204 	int status = 0;
205 	int retval;
206 
207 	*pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
208 	if (*pstrm_res == NULL) {
209 		status = -EFAULT;
210 		goto func_end;
211 	}
212 
213 	(*pstrm_res)->stream = stream_obj;
214 	retval = idr_get_new(ctxt->stream_id, *pstrm_res,
215 						&(*pstrm_res)->id);
216 	if (retval == -EAGAIN) {
217 		if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
218 			pr_err("%s: OUT OF MEMORY\n", __func__);
219 			status = -ENOMEM;
220 			goto func_end;
221 		}
222 
223 		retval = idr_get_new(ctxt->stream_id, *pstrm_res,
224 						&(*pstrm_res)->id);
225 	}
226 	if (retval) {
227 		pr_err("%s: FAILED, IDR is FULL\n", __func__);
228 		status = -EPERM;
229 	}
230 
231 func_end:
232 	return status;
233 }
234 
drv_proc_free_strm_res(int id,void * p,void * process_ctxt)235 static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
236 {
237 	struct process_context *ctxt = process_ctxt;
238 	struct strm_res_object *strm_res = p;
239 	struct stream_info strm_info;
240 	struct dsp_streaminfo user;
241 	u8 **ap_buffer = NULL;
242 	u8 *buf_ptr;
243 	u32 ul_bytes;
244 	u32 dw_arg;
245 	s32 ul_buf_size;
246 
247 	if (strm_res->num_bufs) {
248 		ap_buffer = kmalloc((strm_res->num_bufs *
249 				       sizeof(u8 *)), GFP_KERNEL);
250 		if (ap_buffer) {
251 			strm_free_buffer(strm_res,
252 						  ap_buffer,
253 						  strm_res->num_bufs,
254 						  ctxt);
255 			kfree(ap_buffer);
256 		}
257 	}
258 	strm_info.user_strm = &user;
259 	user.number_bufs_in_stream = 0;
260 	strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info));
261 	while (user.number_bufs_in_stream--)
262 		strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes,
263 			     (u32 *) &ul_buf_size, &dw_arg);
264 	strm_close(strm_res, ctxt);
265 	return 0;
266 }
267 
268 /* Release all Stream resources and its context
269 * This is called from .bridge_release.
270  */
drv_remove_all_strm_res_elements(void * process_ctxt)271 int drv_remove_all_strm_res_elements(void *process_ctxt)
272 {
273 	struct process_context *ctxt = process_ctxt;
274 
275 	idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
276 	idr_destroy(ctxt->stream_id);
277 
278 	return 0;
279 }
280 
281 /* Updating the stream resource element */
drv_proc_update_strm_res(u32 num_bufs,void * strm_resources)282 int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
283 {
284 	int status = 0;
285 	struct strm_res_object **strm_res =
286 	    (struct strm_res_object **)strm_resources;
287 
288 	(*strm_res)->num_bufs = num_bufs;
289 	return status;
290 }
291 
292 /* GPP PROCESS CLEANUP CODE END */
293 
294 /*
295  *  ======== = drv_create ======== =
296  *  Purpose:
297  *      DRV Object gets created only once during Driver Loading.
298  */
drv_create(struct drv_object ** drv_obj)299 int drv_create(struct drv_object **drv_obj)
300 {
301 	int status = 0;
302 	struct drv_object *pdrv_object = NULL;
303 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
304 
305 	pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
306 	if (pdrv_object) {
307 		/* Create and Initialize List of device objects */
308 		INIT_LIST_HEAD(&pdrv_object->dev_list);
309 		INIT_LIST_HEAD(&pdrv_object->dev_node_string);
310 	} else {
311 		status = -ENOMEM;
312 	}
313 	/* Store the DRV Object in the driver data */
314 	if (!status) {
315 		if (drv_datap) {
316 			drv_datap->drv_object = (void *)pdrv_object;
317 		} else {
318 			status = -EPERM;
319 			pr_err("%s: Failed to store DRV object\n", __func__);
320 		}
321 	}
322 
323 	if (!status) {
324 		*drv_obj = pdrv_object;
325 	} else {
326 		/* Free the DRV Object */
327 		kfree(pdrv_object);
328 	}
329 
330 	return status;
331 }
332 
333 /*
334  *  ======== = drv_destroy ======== =
335  *  purpose:
336  *      Invoked during bridge de-initialization
337  */
drv_destroy(struct drv_object * driver_obj)338 int drv_destroy(struct drv_object *driver_obj)
339 {
340 	int status = 0;
341 	struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
342 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
343 
344 	kfree(pdrv_object);
345 	/* Update the DRV Object in the driver data */
346 	if (drv_datap) {
347 		drv_datap->drv_object = NULL;
348 	} else {
349 		status = -EPERM;
350 		pr_err("%s: Failed to store DRV object\n", __func__);
351 	}
352 
353 	return status;
354 }
355 
356 /*
357  *  ======== drv_get_dev_object ========
358  *  Purpose:
359  *      Given a index, returns a handle to DevObject from the list.
360  */
drv_get_dev_object(u32 index,struct drv_object * hdrv_obj,struct dev_object ** device_obj)361 int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
362 			      struct dev_object **device_obj)
363 {
364 	int status = 0;
365 	struct dev_object *dev_obj;
366 	u32 i;
367 
368 	dev_obj = (struct dev_object *)drv_get_first_dev_object();
369 	for (i = 0; i < index; i++) {
370 		dev_obj =
371 		    (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
372 	}
373 	if (dev_obj) {
374 		*device_obj = (struct dev_object *)dev_obj;
375 	} else {
376 		*device_obj = NULL;
377 		status = -EPERM;
378 	}
379 
380 	return status;
381 }
382 
383 /*
384  *  ======== drv_get_first_dev_object ========
385  *  Purpose:
386  *      Retrieve the first Device Object handle from an internal linked list of
387  *      of DEV_OBJECTs maintained by DRV.
388  */
drv_get_first_dev_object(void)389 u32 drv_get_first_dev_object(void)
390 {
391 	u32 dw_dev_object = 0;
392 	struct drv_object *pdrv_obj;
393 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
394 
395 	if (drv_datap && drv_datap->drv_object) {
396 		pdrv_obj = drv_datap->drv_object;
397 		if (!list_empty(&pdrv_obj->dev_list))
398 			dw_dev_object = (u32) pdrv_obj->dev_list.next;
399 	} else {
400 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
401 	}
402 
403 	return dw_dev_object;
404 }
405 
406 /*
407  *  ======== DRV_GetFirstDevNodeString ========
408  *  Purpose:
409  *      Retrieve the first Device Extension from an internal linked list of
410  *      of Pointer to dev_node Strings maintained by DRV.
411  */
drv_get_first_dev_extension(void)412 u32 drv_get_first_dev_extension(void)
413 {
414 	u32 dw_dev_extension = 0;
415 	struct drv_object *pdrv_obj;
416 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
417 
418 	if (drv_datap && drv_datap->drv_object) {
419 		pdrv_obj = drv_datap->drv_object;
420 		if (!list_empty(&pdrv_obj->dev_node_string)) {
421 			dw_dev_extension =
422 			    (u32) pdrv_obj->dev_node_string.next;
423 		}
424 	} else {
425 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
426 	}
427 
428 	return dw_dev_extension;
429 }
430 
431 /*
432  *  ======== drv_get_next_dev_object ========
433  *  Purpose:
434  *      Retrieve the next Device Object handle from an internal linked list of
435  *      of DEV_OBJECTs maintained by DRV, after having previously called
436  *      drv_get_first_dev_object() and zero or more DRV_GetNext.
437  */
drv_get_next_dev_object(u32 hdev_obj)438 u32 drv_get_next_dev_object(u32 hdev_obj)
439 {
440 	u32 dw_next_dev_object = 0;
441 	struct drv_object *pdrv_obj;
442 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
443 	struct list_head *curr;
444 
445 	if (drv_datap && drv_datap->drv_object) {
446 		pdrv_obj = drv_datap->drv_object;
447 		if (!list_empty(&pdrv_obj->dev_list)) {
448 			curr = (struct list_head *)hdev_obj;
449 			if (list_is_last(curr, &pdrv_obj->dev_list))
450 				return 0;
451 			dw_next_dev_object = (u32) curr->next;
452 		}
453 	} else {
454 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
455 	}
456 
457 	return dw_next_dev_object;
458 }
459 
460 /*
461  *  ======== drv_get_next_dev_extension ========
462  *  Purpose:
463  *      Retrieve the next Device Extension from an internal linked list of
464  *      of pointer to DevNodeString maintained by DRV, after having previously
465  *      called drv_get_first_dev_extension() and zero or more
466  *      drv_get_next_dev_extension().
467  */
drv_get_next_dev_extension(u32 dev_extension)468 u32 drv_get_next_dev_extension(u32 dev_extension)
469 {
470 	u32 dw_dev_extension = 0;
471 	struct drv_object *pdrv_obj;
472 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
473 	struct list_head *curr;
474 
475 	if (drv_datap && drv_datap->drv_object) {
476 		pdrv_obj = drv_datap->drv_object;
477 		if (!list_empty(&pdrv_obj->dev_node_string)) {
478 			curr = (struct list_head *)dev_extension;
479 			if (list_is_last(curr, &pdrv_obj->dev_node_string))
480 				return 0;
481 			dw_dev_extension = (u32) curr->next;
482 		}
483 	} else {
484 		pr_err("%s: Failed to retrieve the object handle\n", __func__);
485 	}
486 
487 	return dw_dev_extension;
488 }
489 
490 /*
491  *  ======== drv_insert_dev_object ========
492  *  Purpose:
493  *      Insert a DevObject into the list of Manager object.
494  */
drv_insert_dev_object(struct drv_object * driver_obj,struct dev_object * hdev_obj)495 int drv_insert_dev_object(struct drv_object *driver_obj,
496 				 struct dev_object *hdev_obj)
497 {
498 	struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
499 
500 	list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
501 
502 	return 0;
503 }
504 
505 /*
506  *  ======== drv_remove_dev_object ========
507  *  Purpose:
508  *      Search for and remove a DeviceObject from the given list of DRV
509  *      objects.
510  */
drv_remove_dev_object(struct drv_object * driver_obj,struct dev_object * hdev_obj)511 int drv_remove_dev_object(struct drv_object *driver_obj,
512 				 struct dev_object *hdev_obj)
513 {
514 	int status = -EPERM;
515 	struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
516 	struct list_head *cur_elem;
517 
518 	/* Search list for p_proc_object: */
519 	list_for_each(cur_elem, &pdrv_object->dev_list) {
520 		/* If found, remove it. */
521 		if ((struct dev_object *)cur_elem == hdev_obj) {
522 			list_del(cur_elem);
523 			status = 0;
524 			break;
525 		}
526 	}
527 
528 	return status;
529 }
530 
531 /*
532  *  ======== drv_request_resources ========
533  *  Purpose:
534  *      Requests  resources from the OS.
535  */
drv_request_resources(u32 dw_context,u32 * dev_node_strg)536 int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
537 {
538 	int status = 0;
539 	struct drv_object *pdrv_object;
540 	struct drv_ext *pszdev_node;
541 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
542 
543 	/*
544 	 *  Allocate memory to hold the string. This will live until
545 	 *  it is freed in the Release resources. Update the driver object
546 	 *  list.
547 	 */
548 
549 	if (!drv_datap || !drv_datap->drv_object)
550 		status = -ENODATA;
551 	else
552 		pdrv_object = drv_datap->drv_object;
553 
554 	if (!status) {
555 		pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
556 		if (pszdev_node) {
557 			strncpy(pszdev_node->sz_string,
558 				(char *)dw_context, MAXREGPATHLENGTH - 1);
559 			pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
560 			/* Update the Driver Object List */
561 			*dev_node_strg = (u32) pszdev_node->sz_string;
562 			list_add_tail(&pszdev_node->link,
563 					&pdrv_object->dev_node_string);
564 		} else {
565 			status = -ENOMEM;
566 			*dev_node_strg = 0;
567 		}
568 	} else {
569 		dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
570 			__func__);
571 		*dev_node_strg = 0;
572 	}
573 
574 	return status;
575 }
576 
577 /*
578  *  ======== drv_release_resources ========
579  *  Purpose:
580  *      Releases  resources from the OS.
581  */
drv_release_resources(u32 dw_context,struct drv_object * hdrv_obj)582 int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
583 {
584 	int status = 0;
585 	struct drv_ext *pszdev_node;
586 
587 	/*
588 	 *  Irrespective of the status go ahead and clean it
589 	 *  The following will over write the status.
590 	 */
591 	for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
592 	     pszdev_node != NULL; pszdev_node = (struct drv_ext *)
593 	     drv_get_next_dev_extension((u32) pszdev_node)) {
594 		if ((u32) pszdev_node == dw_context) {
595 			/* Found it */
596 			/* Delete from the Driver object list */
597 			list_del(&pszdev_node->link);
598 			kfree(pszdev_node);
599 			break;
600 		}
601 	}
602 	return status;
603 }
604 
605 /*
606  *  ======== request_bridge_resources ========
607  *  Purpose:
608  *      Reserves shared memory for bridge.
609  */
request_bridge_resources(struct cfg_hostres * res)610 static int request_bridge_resources(struct cfg_hostres *res)
611 {
612 	struct cfg_hostres *host_res = res;
613 
614 	/* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
615 	host_res->num_mem_windows = 2;
616 
617 	/* First window is for DSP internal memory */
618 	dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
619 	dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
620 	dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
621 
622 	/* for 24xx base port is not mapping the mamory for DSP
623 	 * internal memory TODO Do a ioremap here */
624 	/* Second window is for DSP external memory shared with MPU */
625 
626 	/* These are hard-coded values */
627 	host_res->birq_registers = 0;
628 	host_res->birq_attrib = 0;
629 	host_res->offset_for_monitor = 0;
630 	host_res->chnl_offset = 0;
631 	/* CHNL_MAXCHANNELS */
632 	host_res->num_chnls = CHNL_MAXCHANNELS;
633 	host_res->chnl_buf_size = 0x400;
634 
635 	return 0;
636 }
637 
638 /*
639  *  ======== drv_request_bridge_res_dsp ========
640  *  Purpose:
641  *      Reserves shared memory for bridge.
642  */
drv_request_bridge_res_dsp(void ** phost_resources)643 int drv_request_bridge_res_dsp(void **phost_resources)
644 {
645 	int status = 0;
646 	struct cfg_hostres *host_res;
647 	u32 dw_buff_size;
648 	u32 dma_addr;
649 	u32 shm_size;
650 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
651 
652 	dw_buff_size = sizeof(struct cfg_hostres);
653 
654 	host_res = kzalloc(dw_buff_size, GFP_KERNEL);
655 
656 	if (host_res != NULL) {
657 		request_bridge_resources(host_res);
658 		/* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
659 		host_res->num_mem_windows = 4;
660 
661 		host_res->mem_base[0] = 0;
662 		host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
663 							 OMAP_DSP_MEM1_SIZE);
664 		host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
665 							 OMAP_DSP_MEM2_SIZE);
666 		host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
667 							 OMAP_DSP_MEM3_SIZE);
668 		host_res->per_base = ioremap(OMAP_PER_CM_BASE,
669 						OMAP_PER_CM_SIZE);
670 		host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
671 							 OMAP_PER_PRM_SIZE);
672 		host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
673 							  OMAP_CORE_PRM_SIZE);
674 		host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
675 						 OMAP_DMMU_SIZE);
676 
677 		dev_dbg(bridge, "mem_base[0] 0x%x\n",
678 			host_res->mem_base[0]);
679 		dev_dbg(bridge, "mem_base[1] 0x%x\n",
680 			host_res->mem_base[1]);
681 		dev_dbg(bridge, "mem_base[2] 0x%x\n",
682 			host_res->mem_base[2]);
683 		dev_dbg(bridge, "mem_base[3] 0x%x\n",
684 			host_res->mem_base[3]);
685 		dev_dbg(bridge, "mem_base[4] 0x%x\n",
686 			host_res->mem_base[4]);
687 		dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
688 
689 		shm_size = drv_datap->shm_size;
690 		if (shm_size >= 0x10000) {
691 			/* Allocate Physically contiguous,
692 			 * non-cacheable  memory */
693 			host_res->mem_base[1] =
694 			    (u32) mem_alloc_phys_mem(shm_size, 0x100000,
695 						     &dma_addr);
696 			if (host_res->mem_base[1] == 0) {
697 				status = -ENOMEM;
698 				pr_err("shm reservation Failed\n");
699 			} else {
700 				host_res->mem_length[1] = shm_size;
701 				host_res->mem_phys[1] = dma_addr;
702 
703 				dev_dbg(bridge, "%s: Bridge shm address 0x%x "
704 					"dma_addr %x size %x\n", __func__,
705 					host_res->mem_base[1],
706 					dma_addr, shm_size);
707 			}
708 		}
709 		if (!status) {
710 			/* These are hard-coded values */
711 			host_res->birq_registers = 0;
712 			host_res->birq_attrib = 0;
713 			host_res->offset_for_monitor = 0;
714 			host_res->chnl_offset = 0;
715 			/* CHNL_MAXCHANNELS */
716 			host_res->num_chnls = CHNL_MAXCHANNELS;
717 			host_res->chnl_buf_size = 0x400;
718 			dw_buff_size = sizeof(struct cfg_hostres);
719 		}
720 		*phost_resources = host_res;
721 	}
722 	/* End Mem alloc */
723 	return status;
724 }
725 
mem_ext_phys_pool_init(u32 pool_phys_base,u32 pool_size)726 void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
727 {
728 	u32 pool_virt_base;
729 
730 	/* get the virtual address for the physical memory pool passed */
731 	pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
732 
733 	if ((void **)pool_virt_base == NULL) {
734 		pr_err("%s: external physical memory map failed\n", __func__);
735 		ext_phys_mem_pool_enabled = false;
736 	} else {
737 		ext_mem_pool.phys_mem_base = pool_phys_base;
738 		ext_mem_pool.phys_mem_size = pool_size;
739 		ext_mem_pool.virt_mem_base = pool_virt_base;
740 		ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
741 		ext_phys_mem_pool_enabled = true;
742 	}
743 }
744 
mem_ext_phys_pool_release(void)745 void mem_ext_phys_pool_release(void)
746 {
747 	if (ext_phys_mem_pool_enabled) {
748 		iounmap((void *)(ext_mem_pool.virt_mem_base));
749 		ext_phys_mem_pool_enabled = false;
750 	}
751 }
752 
753 /*
754  *  ======== mem_ext_phys_mem_alloc ========
755  *  Purpose:
756  *     Allocate physically contiguous, uncached memory from external memory pool
757  */
758 
mem_ext_phys_mem_alloc(u32 bytes,u32 align,u32 * phys_addr)759 static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
760 {
761 	u32 new_alloc_ptr;
762 	u32 offset;
763 	u32 virt_addr;
764 
765 	if (align == 0)
766 		align = 1;
767 
768 	if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
769 		     - ext_mem_pool.next_phys_alloc_ptr)) {
770 		phys_addr = NULL;
771 		return NULL;
772 	} else {
773 		offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
774 		if (offset == 0)
775 			new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
776 		else
777 			new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
778 			    (align - offset);
779 		if ((new_alloc_ptr + bytes) <=
780 		    (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
781 			/* we can allocate */
782 			*phys_addr = new_alloc_ptr;
783 			ext_mem_pool.next_phys_alloc_ptr =
784 			    new_alloc_ptr + bytes;
785 			virt_addr =
786 			    ext_mem_pool.virt_mem_base + (new_alloc_ptr -
787 							  ext_mem_pool.
788 							  phys_mem_base);
789 			return (void *)virt_addr;
790 		} else {
791 			*phys_addr = 0;
792 			return NULL;
793 		}
794 	}
795 }
796 
797 /*
798  *  ======== mem_alloc_phys_mem ========
799  *  Purpose:
800  *      Allocate physically contiguous, uncached memory
801  */
mem_alloc_phys_mem(u32 byte_size,u32 align_mask,u32 * physical_address)802 void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
803 				u32 *physical_address)
804 {
805 	void *va_mem = NULL;
806 	dma_addr_t pa_mem;
807 
808 	if (byte_size > 0) {
809 		if (ext_phys_mem_pool_enabled) {
810 			va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
811 							(u32 *) &pa_mem);
812 		} else
813 			va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
814 								GFP_KERNEL);
815 		if (va_mem == NULL)
816 			*physical_address = 0;
817 		else
818 			*physical_address = pa_mem;
819 	}
820 	return va_mem;
821 }
822 
823 /*
824  *  ======== mem_free_phys_mem ========
825  *  Purpose:
826  *      Free the given block of physically contiguous memory.
827  */
mem_free_phys_mem(void * virtual_address,u32 physical_address,u32 byte_size)828 void mem_free_phys_mem(void *virtual_address, u32 physical_address,
829 		       u32 byte_size)
830 {
831 	if (!ext_phys_mem_pool_enabled)
832 		dma_free_coherent(NULL, byte_size, virtual_address,
833 				  physical_address);
834 }
835