1 /*
2 * rmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17 /*
18 * This memory manager provides general heap management and arbitrary
19 * alignment for any number of memory segments.
20 *
21 * Notes:
22 *
23 * Memory blocks are allocated from the end of the first free memory
24 * block large enough to satisfy the request. Alignment requirements
25 * are satisfied by "sliding" the block forward until its base satisfies
26 * the alignment specification; if this is not possible then the next
27 * free block large enough to hold the request is tried.
28 *
29 * Since alignment can cause the creation of a new free block - the
30 * unused memory formed between the start of the original free block
31 * and the start of the allocated block - the memory manager must free
32 * this memory to prevent a memory leak.
33 *
34 * Overlay memory is managed by reserving through rmm_alloc, and freeing
35 * it through rmm_free. The memory manager prevents DSP code/data that is
36 * overlayed from being overwritten as long as the memory it runs at has
37 * been allocated, and not yet freed.
38 */
39
40 #include <linux/types.h>
41 #include <linux/list.h>
42
43 /* ----------------------------------- Host OS */
44 #include <dspbridge/host_os.h>
45
46 /* ----------------------------------- DSP/BIOS Bridge */
47 #include <dspbridge/dbdefs.h>
48
49 /* ----------------------------------- Trace & Debug */
50 #include <dspbridge/dbc.h>
51
52 /* ----------------------------------- This */
53 #include <dspbridge/rmm.h>
54
55 /*
56 * ======== rmm_header ========
57 * This header is used to maintain a list of free memory blocks.
58 */
59 struct rmm_header {
60 struct rmm_header *next; /* form a free memory link list */
61 u32 size; /* size of the free memory */
62 u32 addr; /* DSP address of memory block */
63 };
64
65 /*
66 * ======== rmm_ovly_sect ========
67 * Keeps track of memory occupied by overlay section.
68 */
69 struct rmm_ovly_sect {
70 struct list_head list_elem;
71 u32 addr; /* Start of memory section */
72 u32 size; /* Length (target MAUs) of section */
73 s32 page; /* Memory page */
74 };
75
76 /*
77 * ======== rmm_target_obj ========
78 */
79 struct rmm_target_obj {
80 struct rmm_segment *seg_tab;
81 struct rmm_header **free_list;
82 u32 num_segs;
83 struct list_head ovly_list; /* List of overlay memory in use */
84 };
85
86 static u32 refs; /* module reference count */
87
88 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
89 u32 align, u32 *dsp_address);
90 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
91 u32 size);
92
93 /*
94 * ======== rmm_alloc ========
95 */
rmm_alloc(struct rmm_target_obj * target,u32 segid,u32 size,u32 align,u32 * dsp_address,bool reserve)96 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
97 u32 align, u32 *dsp_address, bool reserve)
98 {
99 struct rmm_ovly_sect *sect, *prev_sect = NULL;
100 struct rmm_ovly_sect *new_sect;
101 u32 addr;
102 int status = 0;
103
104 DBC_REQUIRE(target);
105 DBC_REQUIRE(dsp_address != NULL);
106 DBC_REQUIRE(size > 0);
107 DBC_REQUIRE(reserve || (target->num_segs > 0));
108 DBC_REQUIRE(refs > 0);
109
110 if (!reserve) {
111 if (!alloc_block(target, segid, size, align, dsp_address)) {
112 status = -ENOMEM;
113 } else {
114 /* Increment the number of allocated blocks in this
115 * segment */
116 target->seg_tab[segid].number++;
117 }
118 goto func_end;
119 }
120 /* An overlay section - See if block is already in use. If not,
121 * insert into the list in ascending address size. */
122 addr = *dsp_address;
123 /* Find place to insert new list element. List is sorted from
124 * smallest to largest address. */
125 list_for_each_entry(sect, &target->ovly_list, list_elem) {
126 if (addr <= sect->addr) {
127 /* Check for overlap with sect */
128 if ((addr + size > sect->addr) || (prev_sect &&
129 (prev_sect->addr +
130 prev_sect->size >
131 addr))) {
132 status = -ENXIO;
133 }
134 break;
135 }
136 prev_sect = sect;
137 }
138 if (!status) {
139 /* No overlap - allocate list element for new section. */
140 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
141 if (new_sect == NULL) {
142 status = -ENOMEM;
143 } else {
144 new_sect->addr = addr;
145 new_sect->size = size;
146 new_sect->page = segid;
147 if (list_is_last(§->list_elem, &target->ovly_list))
148 /* Put new section at the end of the list */
149 list_add_tail(&new_sect->list_elem,
150 &target->ovly_list);
151 else
152 /* Put new section just before sect */
153 list_add_tail(&new_sect->list_elem,
154 §->list_elem);
155 }
156 }
157 func_end:
158 return status;
159 }
160
161 /*
162 * ======== rmm_create ========
163 */
rmm_create(struct rmm_target_obj ** target_obj,struct rmm_segment seg_tab[],u32 num_segs)164 int rmm_create(struct rmm_target_obj **target_obj,
165 struct rmm_segment seg_tab[], u32 num_segs)
166 {
167 struct rmm_header *hptr;
168 struct rmm_segment *sptr, *tmp;
169 struct rmm_target_obj *target;
170 s32 i;
171 int status = 0;
172
173 DBC_REQUIRE(target_obj != NULL);
174 DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
175
176 /* Allocate DBL target object */
177 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
178
179 if (target == NULL)
180 status = -ENOMEM;
181
182 if (status)
183 goto func_cont;
184
185 target->num_segs = num_segs;
186 if (!(num_segs > 0))
187 goto func_cont;
188
189 /* Allocate the memory for freelist from host's memory */
190 target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
191 GFP_KERNEL);
192 if (target->free_list == NULL) {
193 status = -ENOMEM;
194 } else {
195 /* Allocate headers for each element on the free list */
196 for (i = 0; i < (s32) num_segs; i++) {
197 target->free_list[i] =
198 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
199 if (target->free_list[i] == NULL) {
200 status = -ENOMEM;
201 break;
202 }
203 }
204 /* Allocate memory for initial segment table */
205 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
206 GFP_KERNEL);
207 if (target->seg_tab == NULL) {
208 status = -ENOMEM;
209 } else {
210 /* Initialize segment table and free list */
211 sptr = target->seg_tab;
212 for (i = 0, tmp = seg_tab; num_segs > 0;
213 num_segs--, i++) {
214 *sptr = *tmp;
215 hptr = target->free_list[i];
216 hptr->addr = tmp->base;
217 hptr->size = tmp->length;
218 hptr->next = NULL;
219 tmp++;
220 sptr++;
221 }
222 }
223 }
224 func_cont:
225 /* Initialize overlay memory list */
226 if (!status)
227 INIT_LIST_HEAD(&target->ovly_list);
228
229 if (!status) {
230 *target_obj = target;
231 } else {
232 *target_obj = NULL;
233 if (target)
234 rmm_delete(target);
235
236 }
237
238 DBC_ENSURE((!status && *target_obj)
239 || (status && *target_obj == NULL));
240
241 return status;
242 }
243
244 /*
245 * ======== rmm_delete ========
246 */
rmm_delete(struct rmm_target_obj * target)247 void rmm_delete(struct rmm_target_obj *target)
248 {
249 struct rmm_ovly_sect *sect, *tmp;
250 struct rmm_header *hptr;
251 struct rmm_header *next;
252 u32 i;
253
254 DBC_REQUIRE(target);
255
256 kfree(target->seg_tab);
257
258 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
259 list_del(§->list_elem);
260 kfree(sect);
261 }
262
263 if (target->free_list != NULL) {
264 /* Free elements on freelist */
265 for (i = 0; i < target->num_segs; i++) {
266 hptr = next = target->free_list[i];
267 while (next) {
268 hptr = next;
269 next = hptr->next;
270 kfree(hptr);
271 }
272 }
273 kfree(target->free_list);
274 }
275
276 kfree(target);
277 }
278
279 /*
280 * ======== rmm_exit ========
281 */
rmm_exit(void)282 void rmm_exit(void)
283 {
284 DBC_REQUIRE(refs > 0);
285
286 refs--;
287
288 DBC_ENSURE(refs >= 0);
289 }
290
291 /*
292 * ======== rmm_free ========
293 */
rmm_free(struct rmm_target_obj * target,u32 segid,u32 dsp_addr,u32 size,bool reserved)294 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
295 bool reserved)
296 {
297 struct rmm_ovly_sect *sect, *tmp;
298 bool ret = false;
299
300 DBC_REQUIRE(target);
301
302 DBC_REQUIRE(reserved || segid < target->num_segs);
303 DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
304 (dsp_addr + size) <= (target->seg_tab[segid].
305 base +
306 target->seg_tab[segid].
307 length)));
308
309 /*
310 * Free or unreserve memory.
311 */
312 if (!reserved) {
313 ret = free_block(target, segid, dsp_addr, size);
314 if (ret)
315 target->seg_tab[segid].number--;
316
317 } else {
318 /* Unreserve memory */
319 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
320 list_elem) {
321 if (dsp_addr == sect->addr) {
322 DBC_ASSERT(size == sect->size);
323 /* Remove from list */
324 list_del(§->list_elem);
325 kfree(sect);
326 return true;
327 }
328 }
329 }
330 return ret;
331 }
332
333 /*
334 * ======== rmm_init ========
335 */
rmm_init(void)336 bool rmm_init(void)
337 {
338 DBC_REQUIRE(refs >= 0);
339
340 refs++;
341
342 return true;
343 }
344
345 /*
346 * ======== rmm_stat ========
347 */
rmm_stat(struct rmm_target_obj * target,enum dsp_memtype segid,struct dsp_memstat * mem_stat_buf)348 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
349 struct dsp_memstat *mem_stat_buf)
350 {
351 struct rmm_header *head;
352 bool ret = false;
353 u32 max_free_size = 0;
354 u32 total_free_size = 0;
355 u32 free_blocks = 0;
356
357 DBC_REQUIRE(mem_stat_buf != NULL);
358 DBC_ASSERT(target != NULL);
359
360 if ((u32) segid < target->num_segs) {
361 head = target->free_list[segid];
362
363 /* Collect data from free_list */
364 while (head != NULL) {
365 max_free_size = max(max_free_size, head->size);
366 total_free_size += head->size;
367 free_blocks++;
368 head = head->next;
369 }
370
371 /* ul_size */
372 mem_stat_buf->size = target->seg_tab[segid].length;
373
374 /* num_free_blocks */
375 mem_stat_buf->num_free_blocks = free_blocks;
376
377 /* total_free_size */
378 mem_stat_buf->total_free_size = total_free_size;
379
380 /* len_max_free_block */
381 mem_stat_buf->len_max_free_block = max_free_size;
382
383 /* num_alloc_blocks */
384 mem_stat_buf->num_alloc_blocks =
385 target->seg_tab[segid].number;
386
387 ret = true;
388 }
389
390 return ret;
391 }
392
393 /*
394 * ======== balloc ========
395 * This allocation function allocates memory from the lowest addresses
396 * first.
397 */
alloc_block(struct rmm_target_obj * target,u32 segid,u32 size,u32 align,u32 * dsp_address)398 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
399 u32 align, u32 *dsp_address)
400 {
401 struct rmm_header *head;
402 struct rmm_header *prevhead = NULL;
403 struct rmm_header *next;
404 u32 tmpalign;
405 u32 alignbytes;
406 u32 hsize;
407 u32 allocsize;
408 u32 addr;
409
410 alignbytes = (align == 0) ? 1 : align;
411 prevhead = NULL;
412 head = target->free_list[segid];
413
414 do {
415 hsize = head->size;
416 next = head->next;
417
418 addr = head->addr; /* alloc from the bottom */
419
420 /* align allocation */
421 (tmpalign = (u32) addr % alignbytes);
422 if (tmpalign != 0)
423 tmpalign = alignbytes - tmpalign;
424
425 allocsize = size + tmpalign;
426
427 if (hsize >= allocsize) { /* big enough */
428 if (hsize == allocsize && prevhead != NULL) {
429 prevhead->next = next;
430 kfree(head);
431 } else {
432 head->size = hsize - allocsize;
433 head->addr += allocsize;
434 }
435
436 /* free up any hole created by alignment */
437 if (tmpalign)
438 free_block(target, segid, addr, tmpalign);
439
440 *dsp_address = addr + tmpalign;
441 return true;
442 }
443
444 prevhead = head;
445 head = next;
446
447 } while (head != NULL);
448
449 return false;
450 }
451
452 /*
453 * ======== free_block ========
454 * TO DO: free_block() allocates memory, which could result in failure.
455 * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
456 * free_block() could use an rmm_header from the pool, freeing as blocks
457 * are coalesced.
458 */
free_block(struct rmm_target_obj * target,u32 segid,u32 addr,u32 size)459 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
460 u32 size)
461 {
462 struct rmm_header *head;
463 struct rmm_header *thead;
464 struct rmm_header *rhead;
465 bool ret = true;
466
467 /* Create a memory header to hold the newly free'd block. */
468 rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
469 if (rhead == NULL) {
470 ret = false;
471 } else {
472 /* search down the free list to find the right place for addr */
473 head = target->free_list[segid];
474
475 if (addr >= head->addr) {
476 while (head->next != NULL && addr > head->next->addr)
477 head = head->next;
478
479 thead = head->next;
480
481 head->next = rhead;
482 rhead->next = thead;
483 rhead->addr = addr;
484 rhead->size = size;
485 } else {
486 *rhead = *head;
487 head->next = rhead;
488 head->addr = addr;
489 head->size = size;
490 thead = rhead->next;
491 }
492
493 /* join with upper block, if possible */
494 if (thead != NULL && (rhead->addr + rhead->size) ==
495 thead->addr) {
496 head->next = rhead->next;
497 thead->size = size + thead->size;
498 thead->addr = addr;
499 kfree(rhead);
500 rhead = thead;
501 }
502
503 /* join with the lower block, if possible */
504 if ((head->addr + head->size) == rhead->addr) {
505 head->next = rhead->next;
506 head->size = head->size + rhead->size;
507 kfree(rhead);
508 }
509 }
510
511 return ret;
512 }
513