1 /*
2  * SRAM allocator for Blackfin on-chip memory
3  *
4  * Copyright 2004-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <linux/slab.h>
21 #include <asm/blackfin.h>
22 #include <asm/mem_map.h>
23 #include "blackfin_sram.h"
24 
25 /* the data structure for L1 scratchpad and DATA SRAM */
26 struct sram_piece {
27 	void *paddr;
28 	int size;
29 	pid_t pid;
30 	struct sram_piece *next;
31 };
32 
33 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
34 static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
35 static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
36 
37 #if L1_DATA_A_LENGTH != 0
38 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
39 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
40 #endif
41 
42 #if L1_DATA_B_LENGTH != 0
43 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
44 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
45 #endif
46 
47 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
48 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
49 #endif
50 
51 #if L1_CODE_LENGTH != 0
52 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
53 static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
54 static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
55 #endif
56 
57 #if L2_LENGTH != 0
58 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
59 static struct sram_piece free_l2_sram_head, used_l2_sram_head;
60 #endif
61 
62 static struct kmem_cache *sram_piece_cache;
63 
64 /* L1 Scratchpad SRAM initialization function */
l1sram_init(void)65 static void __init l1sram_init(void)
66 {
67 	unsigned int cpu;
68 	unsigned long reserve;
69 
70 #ifdef CONFIG_SMP
71 	reserve = 0;
72 #else
73 	reserve = sizeof(struct l1_scratch_task_info);
74 #endif
75 
76 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
77 		per_cpu(free_l1_ssram_head, cpu).next =
78 			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
79 		if (!per_cpu(free_l1_ssram_head, cpu).next) {
80 			printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
81 			return;
82 		}
83 
84 		per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
85 		per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
86 		per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
87 		per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
88 
89 		per_cpu(used_l1_ssram_head, cpu).next = NULL;
90 
91 		/* mutex initialize */
92 		spin_lock_init(&per_cpu(l1sram_lock, cpu));
93 		printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
94 			L1_SCRATCH_LENGTH >> 10);
95 	}
96 }
97 
l1_data_sram_init(void)98 static void __init l1_data_sram_init(void)
99 {
100 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
101 	unsigned int cpu;
102 #endif
103 #if L1_DATA_A_LENGTH != 0
104 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
105 		per_cpu(free_l1_data_A_sram_head, cpu).next =
106 			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
107 		if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
108 			printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
109 			return;
110 		}
111 
112 		per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
113 			(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
114 		per_cpu(free_l1_data_A_sram_head, cpu).next->size =
115 			L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
116 		per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
117 		per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
118 
119 		per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
120 
121 		printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
122 			L1_DATA_A_LENGTH >> 10,
123 			per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
124 	}
125 #endif
126 #if L1_DATA_B_LENGTH != 0
127 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
128 		per_cpu(free_l1_data_B_sram_head, cpu).next =
129 			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
130 		if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
131 			printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
132 			return;
133 		}
134 
135 		per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
136 			(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
137 		per_cpu(free_l1_data_B_sram_head, cpu).next->size =
138 			L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
139 		per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
140 		per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
141 
142 		per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
143 
144 		printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
145 			L1_DATA_B_LENGTH >> 10,
146 			per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
147 		/* mutex initialize */
148 	}
149 #endif
150 
151 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
152 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
153 		spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
154 #endif
155 }
156 
l1_inst_sram_init(void)157 static void __init l1_inst_sram_init(void)
158 {
159 #if L1_CODE_LENGTH != 0
160 	unsigned int cpu;
161 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
162 		per_cpu(free_l1_inst_sram_head, cpu).next =
163 			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
164 		if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
165 			printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
166 			return;
167 		}
168 
169 		per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
170 			(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
171 		per_cpu(free_l1_inst_sram_head, cpu).next->size =
172 			L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
173 		per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
174 		per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
175 
176 		per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
177 
178 		printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
179 			L1_CODE_LENGTH >> 10,
180 			per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
181 
182 		/* mutex initialize */
183 		spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
184 	}
185 #endif
186 }
187 
l2_sram_init(void)188 static void __init l2_sram_init(void)
189 {
190 #if L2_LENGTH != 0
191 	free_l2_sram_head.next =
192 		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
193 	if (!free_l2_sram_head.next) {
194 		printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
195 		return;
196 	}
197 
198 	free_l2_sram_head.next->paddr =
199 		(void *)L2_START + (_ebss_l2 - _stext_l2);
200 	free_l2_sram_head.next->size =
201 		L2_LENGTH - (_ebss_l2 - _stext_l2);
202 	free_l2_sram_head.next->pid = 0;
203 	free_l2_sram_head.next->next = NULL;
204 
205 	used_l2_sram_head.next = NULL;
206 
207 	printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
208 		L2_LENGTH >> 10,
209 		free_l2_sram_head.next->size >> 10);
210 
211 	/* mutex initialize */
212 	spin_lock_init(&l2_sram_lock);
213 #endif
214 }
215 
bfin_sram_init(void)216 static int __init bfin_sram_init(void)
217 {
218 	sram_piece_cache = kmem_cache_create("sram_piece_cache",
219 				sizeof(struct sram_piece),
220 				0, SLAB_PANIC, NULL);
221 
222 	l1sram_init();
223 	l1_data_sram_init();
224 	l1_inst_sram_init();
225 	l2_sram_init();
226 
227 	return 0;
228 }
229 pure_initcall(bfin_sram_init);
230 
231 /* SRAM allocate function */
_sram_alloc(size_t size,struct sram_piece * pfree_head,struct sram_piece * pused_head)232 static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
233 		struct sram_piece *pused_head)
234 {
235 	struct sram_piece *pslot, *plast, *pavail;
236 
237 	if (size <= 0 || !pfree_head || !pused_head)
238 		return NULL;
239 
240 	/* Align the size */
241 	size = (size + 3) & ~3;
242 
243 	pslot = pfree_head->next;
244 	plast = pfree_head;
245 
246 	/* search an available piece slot */
247 	while (pslot != NULL && size > pslot->size) {
248 		plast = pslot;
249 		pslot = pslot->next;
250 	}
251 
252 	if (!pslot)
253 		return NULL;
254 
255 	if (pslot->size == size) {
256 		plast->next = pslot->next;
257 		pavail = pslot;
258 	} else {
259 		/* use atomic so our L1 allocator can be used atomically */
260 		pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
261 
262 		if (!pavail)
263 			return NULL;
264 
265 		pavail->paddr = pslot->paddr;
266 		pavail->size = size;
267 		pslot->paddr += size;
268 		pslot->size -= size;
269 	}
270 
271 	pavail->pid = current->pid;
272 
273 	pslot = pused_head->next;
274 	plast = pused_head;
275 
276 	/* insert new piece into used piece list !!! */
277 	while (pslot != NULL && pavail->paddr < pslot->paddr) {
278 		plast = pslot;
279 		pslot = pslot->next;
280 	}
281 
282 	pavail->next = pslot;
283 	plast->next = pavail;
284 
285 	return pavail->paddr;
286 }
287 
288 /* Allocate the largest available block.  */
_sram_alloc_max(struct sram_piece * pfree_head,struct sram_piece * pused_head,unsigned long * psize)289 static void *_sram_alloc_max(struct sram_piece *pfree_head,
290 				struct sram_piece *pused_head,
291 				unsigned long *psize)
292 {
293 	struct sram_piece *pslot, *pmax;
294 
295 	if (!pfree_head || !pused_head)
296 		return NULL;
297 
298 	pmax = pslot = pfree_head->next;
299 
300 	/* search an available piece slot */
301 	while (pslot != NULL) {
302 		if (pslot->size > pmax->size)
303 			pmax = pslot;
304 		pslot = pslot->next;
305 	}
306 
307 	if (!pmax)
308 		return NULL;
309 
310 	*psize = pmax->size;
311 
312 	return _sram_alloc(*psize, pfree_head, pused_head);
313 }
314 
315 /* SRAM free function */
_sram_free(const void * addr,struct sram_piece * pfree_head,struct sram_piece * pused_head)316 static int _sram_free(const void *addr,
317 			struct sram_piece *pfree_head,
318 			struct sram_piece *pused_head)
319 {
320 	struct sram_piece *pslot, *plast, *pavail;
321 
322 	if (!pfree_head || !pused_head)
323 		return -1;
324 
325 	/* search the relevant memory slot */
326 	pslot = pused_head->next;
327 	plast = pused_head;
328 
329 	/* search an available piece slot */
330 	while (pslot != NULL && pslot->paddr != addr) {
331 		plast = pslot;
332 		pslot = pslot->next;
333 	}
334 
335 	if (!pslot)
336 		return -1;
337 
338 	plast->next = pslot->next;
339 	pavail = pslot;
340 	pavail->pid = 0;
341 
342 	/* insert free pieces back to the free list */
343 	pslot = pfree_head->next;
344 	plast = pfree_head;
345 
346 	while (pslot != NULL && addr > pslot->paddr) {
347 		plast = pslot;
348 		pslot = pslot->next;
349 	}
350 
351 	if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
352 		plast->size += pavail->size;
353 		kmem_cache_free(sram_piece_cache, pavail);
354 	} else {
355 		pavail->next = plast->next;
356 		plast->next = pavail;
357 		plast = pavail;
358 	}
359 
360 	if (pslot && plast->paddr + plast->size == pslot->paddr) {
361 		plast->size += pslot->size;
362 		plast->next = pslot->next;
363 		kmem_cache_free(sram_piece_cache, pslot);
364 	}
365 
366 	return 0;
367 }
368 
sram_free(const void * addr)369 int sram_free(const void *addr)
370 {
371 
372 #if L1_CODE_LENGTH != 0
373 	if (addr >= (void *)get_l1_code_start()
374 		 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
375 		return l1_inst_sram_free(addr);
376 	else
377 #endif
378 #if L1_DATA_A_LENGTH != 0
379 	if (addr >= (void *)get_l1_data_a_start()
380 		 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
381 		return l1_data_A_sram_free(addr);
382 	else
383 #endif
384 #if L1_DATA_B_LENGTH != 0
385 	if (addr >= (void *)get_l1_data_b_start()
386 		 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
387 		return l1_data_B_sram_free(addr);
388 	else
389 #endif
390 #if L2_LENGTH != 0
391 	if (addr >= (void *)L2_START
392 		 && addr < (void *)(L2_START + L2_LENGTH))
393 		return l2_sram_free(addr);
394 	else
395 #endif
396 		return -1;
397 }
398 EXPORT_SYMBOL(sram_free);
399 
l1_data_A_sram_alloc(size_t size)400 void *l1_data_A_sram_alloc(size_t size)
401 {
402 #if L1_DATA_A_LENGTH != 0
403 	unsigned long flags;
404 	void *addr;
405 	unsigned int cpu;
406 
407 	cpu = smp_processor_id();
408 	/* add mutex operation */
409 	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
410 
411 	addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
412 			&per_cpu(used_l1_data_A_sram_head, cpu));
413 
414 	/* add mutex operation */
415 	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
416 
417 	pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
418 		 (long unsigned int)addr, size);
419 
420 	return addr;
421 #else
422 	return NULL;
423 #endif
424 }
425 EXPORT_SYMBOL(l1_data_A_sram_alloc);
426 
l1_data_A_sram_free(const void * addr)427 int l1_data_A_sram_free(const void *addr)
428 {
429 #if L1_DATA_A_LENGTH != 0
430 	unsigned long flags;
431 	int ret;
432 	unsigned int cpu;
433 
434 	cpu = smp_processor_id();
435 	/* add mutex operation */
436 	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
437 
438 	ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
439 			&per_cpu(used_l1_data_A_sram_head, cpu));
440 
441 	/* add mutex operation */
442 	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
443 
444 	return ret;
445 #else
446 	return -1;
447 #endif
448 }
449 EXPORT_SYMBOL(l1_data_A_sram_free);
450 
l1_data_B_sram_alloc(size_t size)451 void *l1_data_B_sram_alloc(size_t size)
452 {
453 #if L1_DATA_B_LENGTH != 0
454 	unsigned long flags;
455 	void *addr;
456 	unsigned int cpu;
457 
458 	cpu = smp_processor_id();
459 	/* add mutex operation */
460 	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
461 
462 	addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
463 			&per_cpu(used_l1_data_B_sram_head, cpu));
464 
465 	/* add mutex operation */
466 	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
467 
468 	pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
469 		 (long unsigned int)addr, size);
470 
471 	return addr;
472 #else
473 	return NULL;
474 #endif
475 }
476 EXPORT_SYMBOL(l1_data_B_sram_alloc);
477 
l1_data_B_sram_free(const void * addr)478 int l1_data_B_sram_free(const void *addr)
479 {
480 #if L1_DATA_B_LENGTH != 0
481 	unsigned long flags;
482 	int ret;
483 	unsigned int cpu;
484 
485 	cpu = smp_processor_id();
486 	/* add mutex operation */
487 	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
488 
489 	ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
490 			&per_cpu(used_l1_data_B_sram_head, cpu));
491 
492 	/* add mutex operation */
493 	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
494 
495 	return ret;
496 #else
497 	return -1;
498 #endif
499 }
500 EXPORT_SYMBOL(l1_data_B_sram_free);
501 
l1_data_sram_alloc(size_t size)502 void *l1_data_sram_alloc(size_t size)
503 {
504 	void *addr = l1_data_A_sram_alloc(size);
505 
506 	if (!addr)
507 		addr = l1_data_B_sram_alloc(size);
508 
509 	return addr;
510 }
511 EXPORT_SYMBOL(l1_data_sram_alloc);
512 
l1_data_sram_zalloc(size_t size)513 void *l1_data_sram_zalloc(size_t size)
514 {
515 	void *addr = l1_data_sram_alloc(size);
516 
517 	if (addr)
518 		memset(addr, 0x00, size);
519 
520 	return addr;
521 }
522 EXPORT_SYMBOL(l1_data_sram_zalloc);
523 
l1_data_sram_free(const void * addr)524 int l1_data_sram_free(const void *addr)
525 {
526 	int ret;
527 	ret = l1_data_A_sram_free(addr);
528 	if (ret == -1)
529 		ret = l1_data_B_sram_free(addr);
530 	return ret;
531 }
532 EXPORT_SYMBOL(l1_data_sram_free);
533 
l1_inst_sram_alloc(size_t size)534 void *l1_inst_sram_alloc(size_t size)
535 {
536 #if L1_CODE_LENGTH != 0
537 	unsigned long flags;
538 	void *addr;
539 	unsigned int cpu;
540 
541 	cpu = smp_processor_id();
542 	/* add mutex operation */
543 	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
544 
545 	addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
546 			&per_cpu(used_l1_inst_sram_head, cpu));
547 
548 	/* add mutex operation */
549 	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
550 
551 	pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
552 		 (long unsigned int)addr, size);
553 
554 	return addr;
555 #else
556 	return NULL;
557 #endif
558 }
559 EXPORT_SYMBOL(l1_inst_sram_alloc);
560 
l1_inst_sram_free(const void * addr)561 int l1_inst_sram_free(const void *addr)
562 {
563 #if L1_CODE_LENGTH != 0
564 	unsigned long flags;
565 	int ret;
566 	unsigned int cpu;
567 
568 	cpu = smp_processor_id();
569 	/* add mutex operation */
570 	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
571 
572 	ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
573 			&per_cpu(used_l1_inst_sram_head, cpu));
574 
575 	/* add mutex operation */
576 	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
577 
578 	return ret;
579 #else
580 	return -1;
581 #endif
582 }
583 EXPORT_SYMBOL(l1_inst_sram_free);
584 
585 /* L1 Scratchpad memory allocate function */
l1sram_alloc(size_t size)586 void *l1sram_alloc(size_t size)
587 {
588 	unsigned long flags;
589 	void *addr;
590 	unsigned int cpu;
591 
592 	cpu = smp_processor_id();
593 	/* add mutex operation */
594 	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
595 
596 	addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
597 			&per_cpu(used_l1_ssram_head, cpu));
598 
599 	/* add mutex operation */
600 	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
601 
602 	return addr;
603 }
604 
605 /* L1 Scratchpad memory allocate function */
l1sram_alloc_max(size_t * psize)606 void *l1sram_alloc_max(size_t *psize)
607 {
608 	unsigned long flags;
609 	void *addr;
610 	unsigned int cpu;
611 
612 	cpu = smp_processor_id();
613 	/* add mutex operation */
614 	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
615 
616 	addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
617 			&per_cpu(used_l1_ssram_head, cpu), psize);
618 
619 	/* add mutex operation */
620 	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
621 
622 	return addr;
623 }
624 
625 /* L1 Scratchpad memory free function */
l1sram_free(const void * addr)626 int l1sram_free(const void *addr)
627 {
628 	unsigned long flags;
629 	int ret;
630 	unsigned int cpu;
631 
632 	cpu = smp_processor_id();
633 	/* add mutex operation */
634 	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
635 
636 	ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
637 			&per_cpu(used_l1_ssram_head, cpu));
638 
639 	/* add mutex operation */
640 	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
641 
642 	return ret;
643 }
644 
l2_sram_alloc(size_t size)645 void *l2_sram_alloc(size_t size)
646 {
647 #if L2_LENGTH != 0
648 	unsigned long flags;
649 	void *addr;
650 
651 	/* add mutex operation */
652 	spin_lock_irqsave(&l2_sram_lock, flags);
653 
654 	addr = _sram_alloc(size, &free_l2_sram_head,
655 			&used_l2_sram_head);
656 
657 	/* add mutex operation */
658 	spin_unlock_irqrestore(&l2_sram_lock, flags);
659 
660 	pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
661 		 (long unsigned int)addr, size);
662 
663 	return addr;
664 #else
665 	return NULL;
666 #endif
667 }
668 EXPORT_SYMBOL(l2_sram_alloc);
669 
l2_sram_zalloc(size_t size)670 void *l2_sram_zalloc(size_t size)
671 {
672 	void *addr = l2_sram_alloc(size);
673 
674 	if (addr)
675 		memset(addr, 0x00, size);
676 
677 	return addr;
678 }
679 EXPORT_SYMBOL(l2_sram_zalloc);
680 
l2_sram_free(const void * addr)681 int l2_sram_free(const void *addr)
682 {
683 #if L2_LENGTH != 0
684 	unsigned long flags;
685 	int ret;
686 
687 	/* add mutex operation */
688 	spin_lock_irqsave(&l2_sram_lock, flags);
689 
690 	ret = _sram_free(addr, &free_l2_sram_head,
691 			&used_l2_sram_head);
692 
693 	/* add mutex operation */
694 	spin_unlock_irqrestore(&l2_sram_lock, flags);
695 
696 	return ret;
697 #else
698 	return -1;
699 #endif
700 }
701 EXPORT_SYMBOL(l2_sram_free);
702 
sram_free_with_lsl(const void * addr)703 int sram_free_with_lsl(const void *addr)
704 {
705 	struct sram_list_struct *lsl, **tmp;
706 	struct mm_struct *mm = current->mm;
707 	int ret = -1;
708 
709 	for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
710 		if ((*tmp)->addr == addr) {
711 			lsl = *tmp;
712 			ret = sram_free(addr);
713 			*tmp = lsl->next;
714 			kfree(lsl);
715 			break;
716 		}
717 
718 	return ret;
719 }
720 EXPORT_SYMBOL(sram_free_with_lsl);
721 
722 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
723  * tracked.  These are designed for userspace so that when a process exits,
724  * we can safely reap their resources.
725  */
sram_alloc_with_lsl(size_t size,unsigned long flags)726 void *sram_alloc_with_lsl(size_t size, unsigned long flags)
727 {
728 	void *addr = NULL;
729 	struct sram_list_struct *lsl = NULL;
730 	struct mm_struct *mm = current->mm;
731 
732 	lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
733 	if (!lsl)
734 		return NULL;
735 
736 	if (flags & L1_INST_SRAM)
737 		addr = l1_inst_sram_alloc(size);
738 
739 	if (addr == NULL && (flags & L1_DATA_A_SRAM))
740 		addr = l1_data_A_sram_alloc(size);
741 
742 	if (addr == NULL && (flags & L1_DATA_B_SRAM))
743 		addr = l1_data_B_sram_alloc(size);
744 
745 	if (addr == NULL && (flags & L2_SRAM))
746 		addr = l2_sram_alloc(size);
747 
748 	if (addr == NULL) {
749 		kfree(lsl);
750 		return NULL;
751 	}
752 	lsl->addr = addr;
753 	lsl->length = size;
754 	lsl->next = mm->context.sram_list;
755 	mm->context.sram_list = lsl;
756 	return addr;
757 }
758 EXPORT_SYMBOL(sram_alloc_with_lsl);
759 
760 #ifdef CONFIG_PROC_FS
761 /* Once we get a real allocator, we'll throw all of this away.
762  * Until then, we need some sort of visibility into the L1 alloc.
763  */
764 /* Need to keep line of output the same.  Currently, that is 44 bytes
765  * (including newline).
766  */
_sram_proc_read(char * buf,int * len,int count,const char * desc,struct sram_piece * pfree_head,struct sram_piece * pused_head)767 static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
768 		struct sram_piece *pfree_head,
769 		struct sram_piece *pused_head)
770 {
771 	struct sram_piece *pslot;
772 
773 	if (!pfree_head || !pused_head)
774 		return -1;
775 
776 	*len += sprintf(&buf[*len], "--- SRAM %-14s Size   PID State     \n", desc);
777 
778 	/* search the relevant memory slot */
779 	pslot = pused_head->next;
780 
781 	while (pslot != NULL) {
782 		*len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
783 			pslot->paddr, pslot->paddr + pslot->size,
784 			pslot->size, pslot->pid, "ALLOCATED");
785 
786 		pslot = pslot->next;
787 	}
788 
789 	pslot = pfree_head->next;
790 
791 	while (pslot != NULL) {
792 		*len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
793 			pslot->paddr, pslot->paddr + pslot->size,
794 			pslot->size, pslot->pid, "FREE");
795 
796 		pslot = pslot->next;
797 	}
798 
799 	return 0;
800 }
sram_proc_read(char * buf,char ** start,off_t offset,int count,int * eof,void * data)801 static int sram_proc_read(char *buf, char **start, off_t offset, int count,
802 		int *eof, void *data)
803 {
804 	int len = 0;
805 	unsigned int cpu;
806 
807 	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
808 		if (_sram_proc_read(buf, &len, count, "Scratchpad",
809 			&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
810 			goto not_done;
811 #if L1_DATA_A_LENGTH != 0
812 		if (_sram_proc_read(buf, &len, count, "L1 Data A",
813 			&per_cpu(free_l1_data_A_sram_head, cpu),
814 			&per_cpu(used_l1_data_A_sram_head, cpu)))
815 			goto not_done;
816 #endif
817 #if L1_DATA_B_LENGTH != 0
818 		if (_sram_proc_read(buf, &len, count, "L1 Data B",
819 			&per_cpu(free_l1_data_B_sram_head, cpu),
820 			&per_cpu(used_l1_data_B_sram_head, cpu)))
821 			goto not_done;
822 #endif
823 #if L1_CODE_LENGTH != 0
824 		if (_sram_proc_read(buf, &len, count, "L1 Instruction",
825 			&per_cpu(free_l1_inst_sram_head, cpu),
826 			&per_cpu(used_l1_inst_sram_head, cpu)))
827 			goto not_done;
828 #endif
829 	}
830 #if L2_LENGTH != 0
831 	if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
832 		&used_l2_sram_head))
833 		goto not_done;
834 #endif
835 	*eof = 1;
836  not_done:
837 	return len;
838 }
839 
sram_proc_init(void)840 static int __init sram_proc_init(void)
841 {
842 	struct proc_dir_entry *ptr;
843 	ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
844 	if (!ptr) {
845 		printk(KERN_WARNING "unable to create /proc/sram\n");
846 		return -1;
847 	}
848 	ptr->read_proc = sram_proc_read;
849 	return 0;
850 }
851 late_initcall(sram_proc_init);
852 #endif
853