1 /*
2  * cmm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * The Communication(Shared) Memory Management(CMM) module provides
7  * shared memory management services for DSP/BIOS Bridge data streaming
8  * and messaging.
9  *
10  * Multiple shared memory segments can be registered with CMM.
11  * Each registered SM segment is represented by a SM "allocator" that
12  * describes a block of physically contiguous shared memory used for
13  * future allocations by CMM.
14  *
15  * Memory is coalesced back to the appropriate heap when a buffer is
16  * freed.
17  *
18  * Notes:
19  *   Va: Virtual address.
20  *   Pa: Physical or kernel system address.
21  *
22  * Copyright (C) 2005-2006 Texas Instruments, Inc.
23  *
24  * This package is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License version 2 as
26  * published by the Free Software Foundation.
27  *
28  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
31  */
32 #include <linux/types.h>
33 #include <linux/list.h>
34 
35 /*  ----------------------------------- DSP/BIOS Bridge */
36 #include <dspbridge/dbdefs.h>
37 
38 /*  ----------------------------------- Trace & Debug */
39 #include <dspbridge/dbc.h>
40 
41 /*  ----------------------------------- OS Adaptation Layer */
42 #include <dspbridge/sync.h>
43 
44 /*  ----------------------------------- Platform Manager */
45 #include <dspbridge/dev.h>
46 #include <dspbridge/proc.h>
47 
48 /*  ----------------------------------- This */
49 #include <dspbridge/cmm.h>
50 
51 /*  ----------------------------------- Defines, Data Structures, Typedefs */
52 #define NEXT_PA(pnode)   (pnode->pa + pnode->size)
53 
54 /* Other bus/platform translations */
55 #define DSPPA2GPPPA(base, x, y)  ((x)+(y))
56 #define GPPPA2DSPPA(base, x, y)  ((x)-(y))
57 
58 /*
59  *  Allocators define a block of contiguous memory used for future allocations.
60  *
61  *      sma - shared memory allocator.
62  *      vma - virtual memory allocator.(not used).
63  */
64 struct cmm_allocator {		/* sma */
65 	unsigned int shm_base;	/* Start of physical SM block */
66 	u32 sm_size;		/* Size of SM block in bytes */
67 	unsigned int vm_base;	/* Start of VM block. (Dev driver
68 					 * context for 'sma') */
69 	u32 dsp_phys_addr_offset;	/* DSP PA to GPP PA offset for this
70 					 * SM space */
71 	s8 c_factor;		/* DSPPa to GPPPa Conversion Factor */
72 	unsigned int dsp_base;	/* DSP virt base byte address */
73 	u32 dsp_size;	/* DSP seg size in bytes */
74 	struct cmm_object *cmm_mgr;	/* back ref to parent mgr */
75 	/* node list of available memory */
76 	struct list_head free_list;
77 	/* node list of memory in use */
78 	struct list_head in_use_list;
79 };
80 
81 struct cmm_xlator {		/* Pa<->Va translator object */
82 	/* CMM object this translator associated */
83 	struct cmm_object *cmm_mgr;
84 	/*
85 	 *  Client process virtual base address that corresponds to phys SM
86 	 *  base address for translator's seg_id.
87 	 *  Only 1 segment ID currently supported.
88 	 */
89 	unsigned int virt_base;	/* virtual base address */
90 	u32 virt_size;		/* size of virt space in bytes */
91 	u32 seg_id;		/* Segment Id */
92 };
93 
94 /* CMM Mgr */
95 struct cmm_object {
96 	/*
97 	 * Cmm Lock is used to serialize access mem manager for multi-threads.
98 	 */
99 	struct mutex cmm_lock;	/* Lock to access cmm mgr */
100 	struct list_head node_free_list;	/* Free list of memory nodes */
101 	u32 min_block_size;	/* Min SM block; default 16 bytes */
102 	u32 page_size;	/* Memory Page size (1k/4k) */
103 	/* GPP SM segment ptrs */
104 	struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
105 };
106 
107 /* Default CMM Mgr attributes */
108 static struct cmm_mgrattrs cmm_dfltmgrattrs = {
109 	/* min_block_size, min block size(bytes) allocated by cmm mgr */
110 	16
111 };
112 
113 /* Default allocation attributes */
114 static struct cmm_attrs cmm_dfltalctattrs = {
115 	1		/* seg_id, default segment Id for allocator */
116 };
117 
118 /* Address translator default attrs */
119 static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
120 	/* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
121 	1,
122 	0,			/* dsp_bufs */
123 	0,			/* dsp_buf_size */
124 	NULL,			/* vm_base */
125 	0,			/* vm_size */
126 };
127 
128 /* SM node representing a block of memory. */
129 struct cmm_mnode {
130 	struct list_head link;	/* must be 1st element */
131 	u32 pa;		/* Phys addr */
132 	u32 va;			/* Virtual address in device process context */
133 	u32 size;		/* SM block size in bytes */
134 	u32 client_proc;	/* Process that allocated this mem block */
135 };
136 
137 /*  ----------------------------------- Globals */
138 static u32 refs;		/* module reference count */
139 
140 /*  ----------------------------------- Function Prototypes */
141 static void add_to_free_list(struct cmm_allocator *allocator,
142 			     struct cmm_mnode *pnode);
143 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
144 					   u32 ul_seg_id);
145 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
146 					u32 usize);
147 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
148 				  u32 dw_va, u32 ul_size);
149 /* get available slot for new allocator */
150 static s32 get_slot(struct cmm_object *cmm_mgr_obj);
151 static void un_register_gppsm_seg(struct cmm_allocator *psma);
152 
153 /*
154  *  ======== cmm_calloc_buf ========
155  *  Purpose:
156  *      Allocate a SM buffer, zero contents, and return the physical address
157  *      and optional driver context virtual address(pp_buf_va).
158  *
159  *      The freelist is sorted in increasing size order. Get the first
160  *      block that satifies the request and sort the remaining back on
161  *      the freelist; if large enough. The kept block is placed on the
162  *      inUseList.
163  */
cmm_calloc_buf(struct cmm_object * hcmm_mgr,u32 usize,struct cmm_attrs * pattrs,void ** pp_buf_va)164 void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
165 		     struct cmm_attrs *pattrs, void **pp_buf_va)
166 {
167 	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
168 	void *buf_pa = NULL;
169 	struct cmm_mnode *pnode = NULL;
170 	struct cmm_mnode *new_node = NULL;
171 	struct cmm_allocator *allocator = NULL;
172 	u32 delta_size;
173 	u8 *pbyte = NULL;
174 	s32 cnt;
175 
176 	if (pattrs == NULL)
177 		pattrs = &cmm_dfltalctattrs;
178 
179 	if (pp_buf_va != NULL)
180 		*pp_buf_va = NULL;
181 
182 	if (cmm_mgr_obj && (usize != 0)) {
183 		if (pattrs->seg_id > 0) {
184 			/* SegId > 0 is SM */
185 			/* get the allocator object for this segment id */
186 			allocator =
187 			    get_allocator(cmm_mgr_obj, pattrs->seg_id);
188 			/* keep block size a multiple of min_block_size */
189 			usize =
190 			    ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
191 					     1))
192 			    + cmm_mgr_obj->min_block_size;
193 			mutex_lock(&cmm_mgr_obj->cmm_lock);
194 			pnode = get_free_block(allocator, usize);
195 		}
196 		if (pnode) {
197 			delta_size = (pnode->size - usize);
198 			if (delta_size >= cmm_mgr_obj->min_block_size) {
199 				/* create a new block with the leftovers and
200 				 * add to freelist */
201 				new_node =
202 				    get_node(cmm_mgr_obj, pnode->pa + usize,
203 					     pnode->va + usize,
204 					     (u32) delta_size);
205 				/* leftovers go free */
206 				add_to_free_list(allocator, new_node);
207 				/* adjust our node's size */
208 				pnode->size = usize;
209 			}
210 			/* Tag node with client process requesting allocation
211 			 * We'll need to free up a process's alloc'd SM if the
212 			 * client process goes away.
213 			 */
214 			/* Return TGID instead of process handle */
215 			pnode->client_proc = current->tgid;
216 
217 			/* put our node on InUse list */
218 			list_add_tail(&pnode->link, &allocator->in_use_list);
219 			buf_pa = (void *)pnode->pa;	/* physical address */
220 			/* clear mem */
221 			pbyte = (u8 *) pnode->va;
222 			for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
223 				*pbyte = 0;
224 
225 			if (pp_buf_va != NULL) {
226 				/* Virtual address */
227 				*pp_buf_va = (void *)pnode->va;
228 			}
229 		}
230 		mutex_unlock(&cmm_mgr_obj->cmm_lock);
231 	}
232 	return buf_pa;
233 }
234 
235 /*
236  *  ======== cmm_create ========
237  *  Purpose:
238  *      Create a communication memory manager object.
239  */
cmm_create(struct cmm_object ** ph_cmm_mgr,struct dev_object * hdev_obj,const struct cmm_mgrattrs * mgr_attrts)240 int cmm_create(struct cmm_object **ph_cmm_mgr,
241 		      struct dev_object *hdev_obj,
242 		      const struct cmm_mgrattrs *mgr_attrts)
243 {
244 	struct cmm_object *cmm_obj = NULL;
245 	int status = 0;
246 
247 	DBC_REQUIRE(refs > 0);
248 	DBC_REQUIRE(ph_cmm_mgr != NULL);
249 
250 	*ph_cmm_mgr = NULL;
251 	/* create, zero, and tag a cmm mgr object */
252 	cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
253 	if (!cmm_obj)
254 		return -ENOMEM;
255 
256 	if (mgr_attrts == NULL)
257 		mgr_attrts = &cmm_dfltmgrattrs;	/* set defaults */
258 
259 	/* 4 bytes minimum */
260 	DBC_ASSERT(mgr_attrts->min_block_size >= 4);
261 	/* save away smallest block allocation for this cmm mgr */
262 	cmm_obj->min_block_size = mgr_attrts->min_block_size;
263 	cmm_obj->page_size = PAGE_SIZE;
264 
265 	/* create node free list */
266 	INIT_LIST_HEAD(&cmm_obj->node_free_list);
267 	mutex_init(&cmm_obj->cmm_lock);
268 	*ph_cmm_mgr = cmm_obj;
269 
270 	return status;
271 }
272 
273 /*
274  *  ======== cmm_destroy ========
275  *  Purpose:
276  *      Release the communication memory manager resources.
277  */
cmm_destroy(struct cmm_object * hcmm_mgr,bool force)278 int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
279 {
280 	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
281 	struct cmm_info temp_info;
282 	int status = 0;
283 	s32 slot_seg;
284 	struct cmm_mnode *node, *tmp;
285 
286 	DBC_REQUIRE(refs > 0);
287 	if (!hcmm_mgr) {
288 		status = -EFAULT;
289 		return status;
290 	}
291 	mutex_lock(&cmm_mgr_obj->cmm_lock);
292 	/* If not force then fail if outstanding allocations exist */
293 	if (!force) {
294 		/* Check for outstanding memory allocations */
295 		status = cmm_get_info(hcmm_mgr, &temp_info);
296 		if (!status) {
297 			if (temp_info.total_in_use_cnt > 0) {
298 				/* outstanding allocations */
299 				status = -EPERM;
300 			}
301 		}
302 	}
303 	if (!status) {
304 		/* UnRegister SM allocator */
305 		for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
306 			if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
307 				un_register_gppsm_seg
308 				    (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
309 				/* Set slot to NULL for future reuse */
310 				cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
311 			}
312 		}
313 	}
314 	list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
315 			link) {
316 		list_del(&node->link);
317 		kfree(node);
318 	}
319 	mutex_unlock(&cmm_mgr_obj->cmm_lock);
320 	if (!status) {
321 		/* delete CS & cmm mgr object */
322 		mutex_destroy(&cmm_mgr_obj->cmm_lock);
323 		kfree(cmm_mgr_obj);
324 	}
325 	return status;
326 }
327 
328 /*
329  *  ======== cmm_exit ========
330  *  Purpose:
331  *      Discontinue usage of module; free resources when reference count
332  *      reaches 0.
333  */
cmm_exit(void)334 void cmm_exit(void)
335 {
336 	DBC_REQUIRE(refs > 0);
337 
338 	refs--;
339 }
340 
341 /*
342  *  ======== cmm_free_buf ========
343  *  Purpose:
344  *      Free the given buffer.
345  */
cmm_free_buf(struct cmm_object * hcmm_mgr,void * buf_pa,u32 ul_seg_id)346 int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
347 {
348 	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
349 	int status = -EFAULT;
350 	struct cmm_mnode *curr, *tmp;
351 	struct cmm_allocator *allocator;
352 	struct cmm_attrs *pattrs;
353 
354 	DBC_REQUIRE(refs > 0);
355 	DBC_REQUIRE(buf_pa != NULL);
356 
357 	if (ul_seg_id == 0) {
358 		pattrs = &cmm_dfltalctattrs;
359 		ul_seg_id = pattrs->seg_id;
360 	}
361 	if (!hcmm_mgr || !(ul_seg_id > 0)) {
362 		status = -EFAULT;
363 		return status;
364 	}
365 
366 	allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
367 	if (!allocator)
368 		return status;
369 
370 	mutex_lock(&cmm_mgr_obj->cmm_lock);
371 	list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
372 		if (curr->pa == (u32) buf_pa) {
373 			list_del(&curr->link);
374 			add_to_free_list(allocator, curr);
375 			status = 0;
376 			break;
377 		}
378 	}
379 	mutex_unlock(&cmm_mgr_obj->cmm_lock);
380 
381 	return status;
382 }
383 
384 /*
385  *  ======== cmm_get_handle ========
386  *  Purpose:
387  *      Return the communication memory manager object for this device.
388  *      This is typically called from the client process.
389  */
cmm_get_handle(void * hprocessor,struct cmm_object ** ph_cmm_mgr)390 int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
391 {
392 	int status = 0;
393 	struct dev_object *hdev_obj;
394 
395 	DBC_REQUIRE(refs > 0);
396 	DBC_REQUIRE(ph_cmm_mgr != NULL);
397 	if (hprocessor != NULL)
398 		status = proc_get_dev_object(hprocessor, &hdev_obj);
399 	else
400 		hdev_obj = dev_get_first();	/* default */
401 
402 	if (!status)
403 		status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
404 
405 	return status;
406 }
407 
408 /*
409  *  ======== cmm_get_info ========
410  *  Purpose:
411  *      Return the current memory utilization information.
412  */
cmm_get_info(struct cmm_object * hcmm_mgr,struct cmm_info * cmm_info_obj)413 int cmm_get_info(struct cmm_object *hcmm_mgr,
414 			struct cmm_info *cmm_info_obj)
415 {
416 	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
417 	u32 ul_seg;
418 	int status = 0;
419 	struct cmm_allocator *altr;
420 	struct cmm_mnode *curr;
421 
422 	DBC_REQUIRE(cmm_info_obj != NULL);
423 
424 	if (!hcmm_mgr) {
425 		status = -EFAULT;
426 		return status;
427 	}
428 	mutex_lock(&cmm_mgr_obj->cmm_lock);
429 	cmm_info_obj->num_gppsm_segs = 0;	/* # of SM segments */
430 	/* Total # of outstanding alloc */
431 	cmm_info_obj->total_in_use_cnt = 0;
432 	/* min block size */
433 	cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
434 	/* check SM memory segments */
435 	for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
436 		/* get the allocator object for this segment id */
437 		altr = get_allocator(cmm_mgr_obj, ul_seg);
438 		if (!altr)
439 			continue;
440 		cmm_info_obj->num_gppsm_segs++;
441 		cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
442 			altr->shm_base - altr->dsp_size;
443 		cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
444 			altr->dsp_size + altr->sm_size;
445 		cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
446 			altr->shm_base;
447 		cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
448 			altr->sm_size;
449 		cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
450 			altr->dsp_base;
451 		cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
452 			altr->dsp_size;
453 		cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
454 			altr->vm_base - altr->dsp_size;
455 		cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
456 
457 		list_for_each_entry(curr, &altr->in_use_list, link) {
458 			cmm_info_obj->total_in_use_cnt++;
459 			cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
460 		}
461 	}
462 	mutex_unlock(&cmm_mgr_obj->cmm_lock);
463 	return status;
464 }
465 
466 /*
467  *  ======== cmm_init ========
468  *  Purpose:
469  *      Initializes private state of CMM module.
470  */
cmm_init(void)471 bool cmm_init(void)
472 {
473 	bool ret = true;
474 
475 	DBC_REQUIRE(refs >= 0);
476 	if (ret)
477 		refs++;
478 
479 	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
480 
481 	return ret;
482 }
483 
484 /*
485  *  ======== cmm_register_gppsm_seg ========
486  *  Purpose:
487  *      Register a block of SM with the CMM to be used for later GPP SM
488  *      allocations.
489  */
cmm_register_gppsm_seg(struct cmm_object * hcmm_mgr,u32 dw_gpp_base_pa,u32 ul_size,u32 dsp_addr_offset,s8 c_factor,u32 dw_dsp_base,u32 ul_dsp_size,u32 * sgmt_id,u32 gpp_base_va)490 int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
491 				  u32 dw_gpp_base_pa, u32 ul_size,
492 				  u32 dsp_addr_offset, s8 c_factor,
493 				  u32 dw_dsp_base, u32 ul_dsp_size,
494 				  u32 *sgmt_id, u32 gpp_base_va)
495 {
496 	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
497 	struct cmm_allocator *psma = NULL;
498 	int status = 0;
499 	struct cmm_mnode *new_node;
500 	s32 slot_seg;
501 
502 	DBC_REQUIRE(ul_size > 0);
503 	DBC_REQUIRE(sgmt_id != NULL);
504 	DBC_REQUIRE(dw_gpp_base_pa != 0);
505 	DBC_REQUIRE(gpp_base_va != 0);
506 	DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
507 			(c_factor >= CMM_SUBFROMDSPPA));
508 
509 	dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
510 			"dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
511 			__func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
512 			dw_dsp_base, ul_dsp_size, gpp_base_va);
513 
514 	if (!hcmm_mgr)
515 		return -EFAULT;
516 
517 	/* make sure we have room for another allocator */
518 	mutex_lock(&cmm_mgr_obj->cmm_lock);
519 
520 	slot_seg = get_slot(cmm_mgr_obj);
521 	if (slot_seg < 0) {
522 		status = -EPERM;
523 		goto func_end;
524 	}
525 
526 	/* Check if input ul_size is big enough to alloc at least one block */
527 	if (ul_size < cmm_mgr_obj->min_block_size) {
528 		status = -EINVAL;
529 		goto func_end;
530 	}
531 
532 	/* create, zero, and tag an SM allocator object */
533 	psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
534 	if (!psma) {
535 		status = -ENOMEM;
536 		goto func_end;
537 	}
538 
539 	psma->cmm_mgr = hcmm_mgr;	/* ref to parent */
540 	psma->shm_base = dw_gpp_base_pa;	/* SM Base phys */
541 	psma->sm_size = ul_size;	/* SM segment size in bytes */
542 	psma->vm_base = gpp_base_va;
543 	psma->dsp_phys_addr_offset = dsp_addr_offset;
544 	psma->c_factor = c_factor;
545 	psma->dsp_base = dw_dsp_base;
546 	psma->dsp_size = ul_dsp_size;
547 	if (psma->vm_base == 0) {
548 		status = -EPERM;
549 		goto func_end;
550 	}
551 	/* return the actual segment identifier */
552 	*sgmt_id = (u32) slot_seg + 1;
553 
554 	INIT_LIST_HEAD(&psma->free_list);
555 	INIT_LIST_HEAD(&psma->in_use_list);
556 
557 	/* Get a mem node for this hunk-o-memory */
558 	new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
559 			psma->vm_base, ul_size);
560 	/* Place node on the SM allocator's free list */
561 	if (new_node) {
562 		list_add_tail(&new_node->link, &psma->free_list);
563 	} else {
564 		status = -ENOMEM;
565 		goto func_end;
566 	}
567 	/* make entry */
568 	cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
569 
570 func_end:
571 	/* Cleanup allocator */
572 	if (status && psma)
573 		un_register_gppsm_seg(psma);
574 	mutex_unlock(&cmm_mgr_obj->cmm_lock);
575 
576 	return status;
577 }
578 
579 /*
580  *  ======== cmm_un_register_gppsm_seg ========
581  *  Purpose:
582  *      UnRegister GPP SM segments with the CMM.
583  */
cmm_un_register_gppsm_seg(struct cmm_object * hcmm_mgr,u32 ul_seg_id)584 int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
585 				     u32 ul_seg_id)
586 {
587 	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
588 	int status = 0;
589 	struct cmm_allocator *psma;
590 	u32 ul_id = ul_seg_id;
591 
592 	DBC_REQUIRE(ul_seg_id > 0);
593 	if (!hcmm_mgr)
594 		return -EFAULT;
595 
596 	if (ul_seg_id == CMM_ALLSEGMENTS)
597 		ul_id = 1;
598 
599 	if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
600 		return -EINVAL;
601 
602 	/*
603 	 * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
604 	 * the ul_seg_id is not needed here. It must be always 1.
605 	 */
606 	while (ul_id <= CMM_MAXGPPSEGS) {
607 		mutex_lock(&cmm_mgr_obj->cmm_lock);
608 		/* slot = seg_id-1 */
609 		psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
610 		if (psma != NULL) {
611 			un_register_gppsm_seg(psma);
612 			/* Set alctr ptr to NULL for future reuse */
613 			cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
614 		} else if (ul_seg_id != CMM_ALLSEGMENTS) {
615 			status = -EPERM;
616 		}
617 		mutex_unlock(&cmm_mgr_obj->cmm_lock);
618 		if (ul_seg_id != CMM_ALLSEGMENTS)
619 			break;
620 
621 		ul_id++;
622 	}	/* end while */
623 	return status;
624 }
625 
626 /*
627  *  ======== un_register_gppsm_seg ========
628  *  Purpose:
629  *      UnRegister the SM allocator by freeing all its resources and
630  *      nulling cmm mgr table entry.
631  *  Note:
632  *      This routine is always called within cmm lock crit sect.
633  */
un_register_gppsm_seg(struct cmm_allocator * psma)634 static void un_register_gppsm_seg(struct cmm_allocator *psma)
635 {
636 	struct cmm_mnode *curr, *tmp;
637 
638 	DBC_REQUIRE(psma != NULL);
639 
640 	/* free nodes on free list */
641 	list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
642 		list_del(&curr->link);
643 		kfree(curr);
644 	}
645 
646 	/* free nodes on InUse list */
647 	list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
648 		list_del(&curr->link);
649 		kfree(curr);
650 	}
651 
652 	if ((void *)psma->vm_base != NULL)
653 		MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
654 
655 	/* Free allocator itself */
656 	kfree(psma);
657 }
658 
659 /*
660  *  ======== get_slot ========
661  *  Purpose:
662  *      An available slot # is returned. Returns negative on failure.
663  */
get_slot(struct cmm_object * cmm_mgr_obj)664 static s32 get_slot(struct cmm_object *cmm_mgr_obj)
665 {
666 	s32 slot_seg = -1;	/* neg on failure */
667 	DBC_REQUIRE(cmm_mgr_obj != NULL);
668 	/* get first available slot in cmm mgr SMSegTab[] */
669 	for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
670 		if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
671 			break;
672 
673 	}
674 	if (slot_seg == CMM_MAXGPPSEGS)
675 		slot_seg = -1;	/* failed */
676 
677 	return slot_seg;
678 }
679 
680 /*
681  *  ======== get_node ========
682  *  Purpose:
683  *      Get a memory node from freelist or create a new one.
684  */
get_node(struct cmm_object * cmm_mgr_obj,u32 dw_pa,u32 dw_va,u32 ul_size)685 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
686 				  u32 dw_va, u32 ul_size)
687 {
688 	struct cmm_mnode *pnode;
689 
690 	DBC_REQUIRE(cmm_mgr_obj != NULL);
691 	DBC_REQUIRE(dw_pa != 0);
692 	DBC_REQUIRE(dw_va != 0);
693 	DBC_REQUIRE(ul_size != 0);
694 
695 	/* Check cmm mgr's node freelist */
696 	if (list_empty(&cmm_mgr_obj->node_free_list)) {
697 		pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
698 		if (!pnode)
699 			return NULL;
700 	} else {
701 		/* surely a valid element */
702 		pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
703 				struct cmm_mnode, link);
704 		list_del_init(&pnode->link);
705 	}
706 
707 	pnode->pa = dw_pa;
708 	pnode->va = dw_va;
709 	pnode->size = ul_size;
710 
711 	return pnode;
712 }
713 
714 /*
715  *  ======== delete_node ========
716  *  Purpose:
717  *      Put a memory node on the cmm nodelist for later use.
718  *      Doesn't actually delete the node. Heap thrashing friendly.
719  */
delete_node(struct cmm_object * cmm_mgr_obj,struct cmm_mnode * pnode)720 static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
721 {
722 	DBC_REQUIRE(pnode != NULL);
723 	list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
724 }
725 
726 /*
727  * ====== get_free_block ========
728  *  Purpose:
729  *      Scan the free block list and return the first block that satisfies
730  *      the size.
731  */
get_free_block(struct cmm_allocator * allocator,u32 usize)732 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
733 					u32 usize)
734 {
735 	struct cmm_mnode *node, *tmp;
736 
737 	if (!allocator)
738 		return NULL;
739 
740 	list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
741 		if (usize <= node->size) {
742 			list_del(&node->link);
743 			return node;
744 		}
745 	}
746 
747 	return NULL;
748 }
749 
750 /*
751  *  ======== add_to_free_list ========
752  *  Purpose:
753  *      Coalesce node into the freelist in ascending size order.
754  */
add_to_free_list(struct cmm_allocator * allocator,struct cmm_mnode * node)755 static void add_to_free_list(struct cmm_allocator *allocator,
756 			     struct cmm_mnode *node)
757 {
758 	struct cmm_mnode *curr;
759 
760 	if (!node) {
761 		pr_err("%s: failed - node is NULL\n", __func__);
762 		return;
763 	}
764 
765 	list_for_each_entry(curr, &allocator->free_list, link) {
766 		if (NEXT_PA(curr) == node->pa) {
767 			curr->size += node->size;
768 			delete_node(allocator->cmm_mgr, node);
769 			return;
770 		}
771 		if (curr->pa == NEXT_PA(node)) {
772 			curr->pa = node->pa;
773 			curr->va = node->va;
774 			curr->size += node->size;
775 			delete_node(allocator->cmm_mgr, node);
776 			return;
777 		}
778 	}
779 	list_for_each_entry(curr, &allocator->free_list, link) {
780 		if (curr->size >= node->size) {
781 			list_add_tail(&node->link, &curr->link);
782 			return;
783 		}
784 	}
785 	list_add_tail(&node->link, &allocator->free_list);
786 }
787 
788 /*
789  * ======== get_allocator ========
790  *  Purpose:
791  *      Return the allocator for the given SM Segid.
792  *      SegIds:  1,2,3..max.
793  */
get_allocator(struct cmm_object * cmm_mgr_obj,u32 ul_seg_id)794 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
795 					   u32 ul_seg_id)
796 {
797 	DBC_REQUIRE(cmm_mgr_obj != NULL);
798 	DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
799 
800 	return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
801 }
802 
803 /*
804  *  The CMM_Xlator[xxx] routines below are used by Node and Stream
805  *  to perform SM address translation to the client process address space.
806  *  A "translator" object is created by a node/stream for each SM seg used.
807  */
808 
809 /*
810  *  ======== cmm_xlator_create ========
811  *  Purpose:
812  *      Create an address translator object.
813  */
cmm_xlator_create(struct cmm_xlatorobject ** xlator,struct cmm_object * hcmm_mgr,struct cmm_xlatorattrs * xlator_attrs)814 int cmm_xlator_create(struct cmm_xlatorobject **xlator,
815 			     struct cmm_object *hcmm_mgr,
816 			     struct cmm_xlatorattrs *xlator_attrs)
817 {
818 	struct cmm_xlator *xlator_object = NULL;
819 	int status = 0;
820 
821 	DBC_REQUIRE(refs > 0);
822 	DBC_REQUIRE(xlator != NULL);
823 	DBC_REQUIRE(hcmm_mgr != NULL);
824 
825 	*xlator = NULL;
826 	if (xlator_attrs == NULL)
827 		xlator_attrs = &cmm_dfltxlatorattrs;	/* set defaults */
828 
829 	xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
830 	if (xlator_object != NULL) {
831 		xlator_object->cmm_mgr = hcmm_mgr;	/* ref back to CMM */
832 		/* SM seg_id */
833 		xlator_object->seg_id = xlator_attrs->seg_id;
834 	} else {
835 		status = -ENOMEM;
836 	}
837 	if (!status)
838 		*xlator = (struct cmm_xlatorobject *)xlator_object;
839 
840 	return status;
841 }
842 
843 /*
844  *  ======== cmm_xlator_alloc_buf ========
845  */
cmm_xlator_alloc_buf(struct cmm_xlatorobject * xlator,void * va_buf,u32 pa_size)846 void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
847 			   u32 pa_size)
848 {
849 	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
850 	void *pbuf = NULL;
851 	void *tmp_va_buff;
852 	struct cmm_attrs attrs;
853 
854 	DBC_REQUIRE(refs > 0);
855 	DBC_REQUIRE(xlator != NULL);
856 	DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
857 	DBC_REQUIRE(va_buf != NULL);
858 	DBC_REQUIRE(pa_size > 0);
859 	DBC_REQUIRE(xlator_obj->seg_id > 0);
860 
861 	if (xlator_obj) {
862 		attrs.seg_id = xlator_obj->seg_id;
863 		__raw_writel(0, va_buf);
864 		/* Alloc SM */
865 		pbuf =
866 		    cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
867 		if (pbuf) {
868 			/* convert to translator(node/strm) process Virtual
869 			 * address */
870 			 tmp_va_buff = cmm_xlator_translate(xlator,
871 							 pbuf, CMM_PA2VA);
872 			__raw_writel((u32)tmp_va_buff, va_buf);
873 		}
874 	}
875 	return pbuf;
876 }
877 
878 /*
879  *  ======== cmm_xlator_free_buf ========
880  *  Purpose:
881  *      Free the given SM buffer and descriptor.
882  *      Does not free virtual memory.
883  */
cmm_xlator_free_buf(struct cmm_xlatorobject * xlator,void * buf_va)884 int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
885 {
886 	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
887 	int status = -EPERM;
888 	void *buf_pa = NULL;
889 
890 	DBC_REQUIRE(refs > 0);
891 	DBC_REQUIRE(buf_va != NULL);
892 	DBC_REQUIRE(xlator_obj->seg_id > 0);
893 
894 	if (xlator_obj) {
895 		/* convert Va to Pa so we can free it. */
896 		buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
897 		if (buf_pa) {
898 			status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
899 					      xlator_obj->seg_id);
900 			if (status) {
901 				/* Uh oh, this shouldn't happen. Descriptor
902 				 * gone! */
903 				DBC_ASSERT(false);	/* CMM is leaking mem */
904 			}
905 		}
906 	}
907 	return status;
908 }
909 
910 /*
911  *  ======== cmm_xlator_info ========
912  *  Purpose:
913  *      Set/Get translator info.
914  */
cmm_xlator_info(struct cmm_xlatorobject * xlator,u8 ** paddr,u32 ul_size,u32 segm_id,bool set_info)915 int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
916 			   u32 ul_size, u32 segm_id, bool set_info)
917 {
918 	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
919 	int status = 0;
920 
921 	DBC_REQUIRE(refs > 0);
922 	DBC_REQUIRE(paddr != NULL);
923 	DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
924 
925 	if (xlator_obj) {
926 		if (set_info) {
927 			/* set translators virtual address range */
928 			xlator_obj->virt_base = (u32) *paddr;
929 			xlator_obj->virt_size = ul_size;
930 		} else {	/* return virt base address */
931 			*paddr = (u8 *) xlator_obj->virt_base;
932 		}
933 	} else {
934 		status = -EFAULT;
935 	}
936 	return status;
937 }
938 
939 /*
940  *  ======== cmm_xlator_translate ========
941  */
cmm_xlator_translate(struct cmm_xlatorobject * xlator,void * paddr,enum cmm_xlatetype xtype)942 void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
943 			   enum cmm_xlatetype xtype)
944 {
945 	u32 dw_addr_xlate = 0;
946 	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
947 	struct cmm_object *cmm_mgr_obj = NULL;
948 	struct cmm_allocator *allocator = NULL;
949 	u32 dw_offset = 0;
950 
951 	DBC_REQUIRE(refs > 0);
952 	DBC_REQUIRE(paddr != NULL);
953 	DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
954 
955 	if (!xlator_obj)
956 		goto loop_cont;
957 
958 	cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
959 	/* get this translator's default SM allocator */
960 	DBC_ASSERT(xlator_obj->seg_id > 0);
961 	allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
962 	if (!allocator)
963 		goto loop_cont;
964 
965 	if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
966 	    (xtype == CMM_PA2VA)) {
967 		if (xtype == CMM_PA2VA) {
968 			/* Gpp Va = Va Base + offset */
969 			dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
970 							   allocator->
971 							   dsp_size);
972 			dw_addr_xlate = xlator_obj->virt_base + dw_offset;
973 			/* Check if translated Va base is in range */
974 			if ((dw_addr_xlate < xlator_obj->virt_base) ||
975 			    (dw_addr_xlate >=
976 			     (xlator_obj->virt_base +
977 			      xlator_obj->virt_size))) {
978 				dw_addr_xlate = 0;	/* bad address */
979 			}
980 		} else {
981 			/* Gpp PA =  Gpp Base + offset */
982 			dw_offset =
983 			    (u8 *) paddr - (u8 *) xlator_obj->virt_base;
984 			dw_addr_xlate =
985 			    allocator->shm_base - allocator->dsp_size +
986 			    dw_offset;
987 		}
988 	} else {
989 		dw_addr_xlate = (u32) paddr;
990 	}
991 	/*Now convert address to proper target physical address if needed */
992 	if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
993 		/* Got Gpp Pa now, convert to DSP Pa */
994 		dw_addr_xlate =
995 		    GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
996 				dw_addr_xlate,
997 				allocator->dsp_phys_addr_offset *
998 				allocator->c_factor);
999 	} else if (xtype == CMM_DSPPA2PA) {
1000 		/* Got DSP Pa, convert to GPP Pa */
1001 		dw_addr_xlate =
1002 		    DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
1003 				dw_addr_xlate,
1004 				allocator->dsp_phys_addr_offset *
1005 				allocator->c_factor);
1006 	}
1007 loop_cont:
1008 	return (void *)dw_addr_xlate;
1009 }
1010