1 /*
2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/DMA-mapping.txt for interface definitions.
6 **
7 **      (c) Copyright 1999,2000 Hewlett-Packard Company
8 **      (c) Copyright 2000 Grant Grundler
9 **	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 **      (c) Copyright 2000 John Marvin
11 **
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
14 **
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
16 **
17 ** - ggg
18 */
19 
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/string.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 
29 #include <asm/uaccess.h>
30 #include <asm/pgalloc.h>
31 
32 #include <asm/io.h>
33 #include <asm/page.h>	/* get_order */
34 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
35 
36 #include <linux/proc_fs.h>
37 
38 static struct proc_dir_entry * proc_gsc_root = NULL;
39 static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
40 static unsigned long pcxl_used_bytes = 0;
41 static unsigned long pcxl_used_pages = 0;
42 
43 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
44 static spinlock_t   pcxl_res_lock;
45 static char    *pcxl_res_map;
46 static int     pcxl_res_hint;
47 static int     pcxl_res_size;
48 
49 #ifdef DEBUG_PCXL_RESOURCE
50 #define DBG_RES(x...)	printk(x)
51 #else
52 #define DBG_RES(x...)
53 #endif
54 
55 
56 /*
57 ** Dump a hex representation of the resource map.
58 */
59 
60 #ifdef DUMP_RESMAP
61 static
dump_resmap(void)62 void dump_resmap(void)
63 {
64 	u_long *res_ptr = (unsigned long *)pcxl_res_map;
65 	u_long i = 0;
66 
67 	printk("res_map: ");
68 	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
69 		printk("%08lx ", *res_ptr);
70 
71 	printk("\n");
72 }
73 #else
dump_resmap(void)74 static inline void dump_resmap(void) {;}
75 #endif
76 
pa11_dma_supported(struct pci_dev * dev,u64 mask)77 static int pa11_dma_supported( struct pci_dev *dev, u64 mask)
78 {
79 	return 1;
80 }
81 
map_pte_uncached(pte_t * pte,unsigned long vaddr,unsigned long size,unsigned long * paddr_ptr)82 static inline int map_pte_uncached(pte_t * pte,
83 		unsigned long vaddr,
84 		unsigned long size, unsigned long *paddr_ptr)
85 {
86 	unsigned long end;
87 	unsigned long orig_vaddr = vaddr;
88 
89 	vaddr &= ~PMD_MASK;
90 	end = vaddr + size;
91 	if (end > PMD_SIZE)
92 		end = PMD_SIZE;
93 	do {
94 		if (!pte_none(*pte))
95 			printk(KERN_ERR "map_pte_uncached: page already exists\n");
96 		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
97 		pdtlb_kernel(orig_vaddr);
98 		vaddr += PAGE_SIZE;
99 		orig_vaddr += PAGE_SIZE;
100 		(*paddr_ptr) += PAGE_SIZE;
101 		pte++;
102 	} while (vaddr < end);
103 	return 0;
104 }
105 
map_pmd_uncached(pmd_t * pmd,unsigned long vaddr,unsigned long size,unsigned long * paddr_ptr)106 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
107 		unsigned long size, unsigned long *paddr_ptr)
108 {
109 	unsigned long end;
110 	unsigned long orig_vaddr = vaddr;
111 
112 	vaddr &= ~PGDIR_MASK;
113 	end = vaddr + size;
114 	if (end > PGDIR_SIZE)
115 		end = PGDIR_SIZE;
116 	do {
117 		pte_t * pte = pte_alloc(NULL, pmd, vaddr);
118 		if (!pte)
119 			return -ENOMEM;
120 		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
121 			return -ENOMEM;
122 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
123 		orig_vaddr += PMD_SIZE;
124 		pmd++;
125 	} while (vaddr < end);
126 	return 0;
127 }
128 
map_uncached_pages(unsigned long vaddr,unsigned long size,unsigned long paddr)129 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
130 		unsigned long paddr)
131 {
132 	pgd_t * dir;
133 	unsigned long end = vaddr + size;
134 
135 	dir = pgd_offset_k(vaddr);
136 	do {
137 		pmd_t *pmd;
138 
139 		pmd = pmd_alloc(NULL, dir, vaddr);
140 		if (!pmd)
141 			return -ENOMEM;
142 		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
143 			return -ENOMEM;
144 		vaddr = vaddr + PGDIR_SIZE;
145 		dir++;
146 	} while (vaddr && (vaddr < end));
147 	return 0;
148 }
149 
unmap_uncached_pte(pmd_t * pmd,unsigned long vaddr,unsigned long size)150 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
151 		unsigned long size)
152 {
153 	pte_t * pte;
154 	unsigned long end;
155 	unsigned long orig_vaddr = vaddr;
156 
157 	if (pmd_none(*pmd))
158 		return;
159 	if (pmd_bad(*pmd)) {
160 		pmd_ERROR(*pmd);
161 		pmd_clear(pmd);
162 		return;
163 	}
164 	pte = pte_offset(pmd, vaddr);
165 	vaddr &= ~PMD_MASK;
166 	end = vaddr + size;
167 	if (end > PMD_SIZE)
168 		end = PMD_SIZE;
169 	do {
170 		pte_t page = *pte;
171 		pte_clear(pte);
172 		pdtlb_kernel(orig_vaddr);
173 		vaddr += PAGE_SIZE;
174 		orig_vaddr += PAGE_SIZE;
175 		pte++;
176 		if (pte_none(page) || pte_present(page))
177 			continue;
178 		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
179 	} while (vaddr < end);
180 }
181 
unmap_uncached_pmd(pgd_t * dir,unsigned long vaddr,unsigned long size)182 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
183 		unsigned long size)
184 {
185 	pmd_t * pmd;
186 	unsigned long end;
187 	unsigned long orig_vaddr = vaddr;
188 
189 	if (pgd_none(*dir))
190 		return;
191 	if (pgd_bad(*dir)) {
192 		pgd_ERROR(*dir);
193 		pgd_clear(dir);
194 		return;
195 	}
196 	pmd = pmd_offset(dir, vaddr);
197 	vaddr &= ~PGDIR_MASK;
198 	end = vaddr + size;
199 	if (end > PGDIR_SIZE)
200 		end = PGDIR_SIZE;
201 	do {
202 		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
203 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
204 		orig_vaddr += PMD_SIZE;
205 		pmd++;
206 	} while (vaddr < end);
207 }
208 
unmap_uncached_pages(unsigned long vaddr,unsigned long size)209 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
210 {
211 	pgd_t * dir;
212 	unsigned long end = vaddr + size;
213 
214 	dir = pgd_offset_k(vaddr);
215 	do {
216 		unmap_uncached_pmd(dir, vaddr, end - vaddr);
217 		vaddr = vaddr + PGDIR_SIZE;
218 		dir++;
219 	} while (vaddr && (vaddr < end));
220 }
221 
222 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
223        for(; res_ptr < res_end; ++res_ptr) \
224        { \
225                if(0 == ((*res_ptr) & mask)) { \
226                        *res_ptr |= mask; \
227 		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
228 		       pcxl_res_hint = idx + (size >> 3); \
229                        goto resource_found; \
230                } \
231        }
232 
233 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
234        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
235        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
236        PCXL_SEARCH_LOOP(idx, mask, size); \
237        res_ptr = (u##size *)&pcxl_res_map[0]; \
238        PCXL_SEARCH_LOOP(idx, mask, size); \
239 }
240 
241 unsigned long
pcxl_alloc_range(size_t size)242 pcxl_alloc_range(size_t size)
243 {
244 	int res_idx;
245 	u_long mask, flags;
246 	unsigned int pages_needed = size >> PAGE_SHIFT;
247 
248 	ASSERT(pages_needed);
249 	ASSERT((pages_needed * PAGE_SIZE) < DMA_CHUNK_SIZE);
250 	ASSERT(pages_needed < (BITS_PER_LONG - PAGE_SHIFT));
251 
252 	mask = (u_long) -1L;
253  	mask >>= BITS_PER_LONG - pages_needed;
254 
255 	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
256 		size, pages_needed, mask);
257 
258 	spin_lock_irqsave(&pcxl_res_lock, flags);
259 
260 	if(pages_needed <= 8) {
261 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
262 	} else if(pages_needed <= 16) {
263 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
264 	} else if(pages_needed <= 32) {
265 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
266 	} else {
267 		panic(__FILE__ ": pcxl_alloc_range() Too many pages to map.\n");
268 	}
269 
270 	dump_resmap();
271 	panic(__FILE__ ": pcxl_alloc_range() out of dma mapping resources\n");
272 
273 resource_found:
274 
275 	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
276 		res_idx, mask, pcxl_res_hint);
277 
278 	pcxl_used_pages += pages_needed;
279 	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
280 
281 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
282 
283 	dump_resmap();
284 
285 	/*
286 	** return the corresponding vaddr in the pcxl dma map
287 	*/
288 	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
289 }
290 
291 #define PCXL_FREE_MAPPINGS(idx, m, size) \
292 		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
293 		ASSERT((*res_ptr & m) == m); \
294 		*res_ptr &= ~m;
295 
296 /*
297 ** clear bits in the pcxl resource map
298 */
299 static void
pcxl_free_range(unsigned long vaddr,size_t size)300 pcxl_free_range(unsigned long vaddr, size_t size)
301 {
302 	u_long mask, flags;
303 	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
304 	unsigned int pages_mapped = size >> PAGE_SHIFT;
305 
306 	ASSERT(pages_mapped);
307 	ASSERT((pages_mapped * PAGE_SIZE) < DMA_CHUNK_SIZE);
308 	ASSERT(pages_mapped < (BITS_PER_LONG - PAGE_SHIFT));
309 
310 	mask = (u_long) -1L;
311  	mask >>= BITS_PER_LONG - pages_mapped;
312 
313 	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
314 		res_idx, size, pages_mapped, mask);
315 
316 	spin_lock_irqsave(&pcxl_res_lock, flags);
317 
318 	if(pages_mapped <= 8) {
319 		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
320 	} else if(pages_mapped <= 16) {
321 		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
322 	} else if(pages_mapped <= 32) {
323 		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
324 	} else {
325 		panic(__FILE__ ": pcxl_free_range() Too many pages to unmap.\n");
326 	}
327 
328 	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
329 	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
330 
331 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
332 
333 	dump_resmap();
334 }
335 
336 static int __init
pcxl_dma_init(void)337 pcxl_dma_init(void)
338 {
339     if (pcxl_dma_start == 0)
340 	return 0;
341 
342     spin_lock_init(&pcxl_res_lock);
343     pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
344     pcxl_res_hint = 0;
345     pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
346 					    get_order(pcxl_res_size));
347 
348     proc_gsc_root = proc_mkdir("gsc", 0);
349     create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info);
350     return 0;
351 }
352 
353 __initcall(pcxl_dma_init);
354 
pa11_dma_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)355 static void * pa11_dma_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
356 {
357 	unsigned long vaddr;
358 	unsigned long paddr;
359 	int order;
360 
361 	order = get_order(size);
362 	size = 1 << (order + PAGE_SHIFT);
363 	vaddr = pcxl_alloc_range(size);
364 	paddr = __get_free_pages(GFP_ATOMIC, order);
365 	flush_kernel_dcache_range(paddr, size);
366 	paddr = __pa(paddr);
367 	map_uncached_pages(vaddr, size, paddr);
368 	*dma_handle = (dma_addr_t) paddr;
369 
370 #if 0
371 /* This probably isn't needed to support EISA cards.
372 ** ISA cards will certainly only support 24-bit DMA addressing.
373 ** Not clear if we can, want, or need to support ISA.
374 */
375 	if (!hwdev || hwdev->dma_mask != 0xffffffff)
376 		gfp |= GFP_DMA;
377 #endif
378 	return (void *)vaddr;
379 }
380 
pa11_dma_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)381 static void pa11_dma_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
382 {
383 	int order;
384 
385 	order = get_order(size);
386 	size = 1 << (order + PAGE_SHIFT);
387 	unmap_uncached_pages((unsigned long)vaddr, size);
388 	pcxl_free_range((unsigned long)vaddr, size);
389 	free_pages((unsigned long)__va(dma_handle), order);
390 }
391 
pa11_dma_map_single(struct pci_dev * dev,void * addr,size_t size,int direction)392 static dma_addr_t pa11_dma_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
393 {
394 	if (direction == PCI_DMA_NONE) {
395 		printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
396 		BUG();
397 	}
398 
399 	flush_kernel_dcache_range((unsigned long) addr, size);
400 	return virt_to_phys(addr);
401 }
402 
pa11_dma_unmap_single(struct pci_dev * dev,dma_addr_t dma_handle,size_t size,int direction)403 static void pa11_dma_unmap_single(struct pci_dev *dev, dma_addr_t dma_handle, size_t size, int direction)
404 {
405 	if (direction == PCI_DMA_NONE) {
406 		printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
407 		BUG();
408 	}
409 
410 	if (direction == PCI_DMA_TODEVICE)
411 	    return;
412 
413 	/*
414 	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
415 	 * simple map/unmap case. However, it IS necessary if if
416 	 * pci_dma_sync_single has been called and the buffer reused.
417 	 */
418 
419 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
420 	return;
421 }
422 
pa11_dma_map_sg(struct pci_dev * dev,struct scatterlist * sglist,int nents,int direction)423 static int pa11_dma_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
424 {
425 	int i;
426 
427 	if (direction == PCI_DMA_NONE)
428 	    BUG();
429 
430 	for (i = 0; i < nents; i++, sglist++ ) {
431 		sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(sg_virt_addr(sglist));
432 		sg_dma_len(sglist) = sglist->length;
433 		flush_kernel_dcache_range((unsigned long)sg_virt_addr(sglist),
434 				sglist->length);
435 	}
436 	return nents;
437 }
438 
pa11_dma_unmap_sg(struct pci_dev * dev,struct scatterlist * sglist,int nents,int direction)439 static void pa11_dma_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
440 {
441 	int i;
442 
443 	if (direction == PCI_DMA_NONE)
444 	    BUG();
445 
446 	if (direction == PCI_DMA_TODEVICE)
447 	    return;
448 
449 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
450 
451 	for (i = 0; i < nents; i++, sglist++ )
452 		flush_kernel_dcache_range((unsigned long) sg_virt_addr(sglist), sglist->length);
453 	return;
454 }
455 
pa11_dma_sync_single(struct pci_dev * dev,dma_addr_t dma_handle,size_t size,int direction)456 static void pa11_dma_sync_single(struct pci_dev *dev, dma_addr_t dma_handle, size_t size, int direction)
457 {
458 	if (direction == PCI_DMA_NONE)
459 	    BUG();
460 
461 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
462 }
463 
pa11_dma_sync_sg(struct pci_dev * dev,struct scatterlist * sglist,int nents,int direction)464 static void pa11_dma_sync_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
465 {
466 	int i;
467 
468 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
469 
470 	for (i = 0; i < nents; i++, sglist++ )
471 		flush_kernel_dcache_range((unsigned long) sg_virt_addr(sglist), sglist->length);
472 }
473 
474 struct pci_dma_ops pcxl_dma_ops = {
475 	pa11_dma_supported,			/* dma_support */
476 	pa11_dma_alloc_consistent,
477 	pa11_dma_free_consistent,
478 	pa11_dma_map_single,			/* map_single */
479 	pa11_dma_unmap_single,			/* unmap_single */
480 	pa11_dma_map_sg,			/* map_sg */
481 	pa11_dma_unmap_sg,			/* unmap_sg */
482 	pa11_dma_sync_single,			/* dma_sync_single */
483 	pa11_dma_sync_sg			/* dma_sync_sg */
484 };
485 
fail_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)486 static void *fail_alloc_consistent(struct pci_dev *hwdev, size_t size,
487 		dma_addr_t *dma_handle)
488 {
489 	return NULL;
490 }
491 
fail_free_consistent(struct pci_dev * dev,size_t size,void * vaddr,dma_addr_t iova)492 static void fail_free_consistent(struct pci_dev *dev, size_t size,
493 		void *vaddr, dma_addr_t iova)
494 {
495 	return;
496 }
497 
498 struct pci_dma_ops pcx_dma_ops = {
499 	pa11_dma_supported,			/* dma_support */
500 	fail_alloc_consistent,
501 	fail_free_consistent,
502 	pa11_dma_map_single,			/* map_single */
503 	pa11_dma_unmap_single,			/* unmap_single */
504 	pa11_dma_map_sg,			/* map_sg */
505 	pa11_dma_unmap_sg,			/* unmap_sg */
506 	pa11_dma_sync_single,			/* dma_sync_single */
507 	pa11_dma_sync_sg			/* dma_sync_sg */
508 };
509 
510 
pcxl_proc_info(char * buf,char ** start,off_t offset,int len)511 static int pcxl_proc_info(char *buf, char **start, off_t offset, int len)
512 {
513 	u_long i = 0;
514 	unsigned long *res_ptr = (u_long *)pcxl_res_map;
515 	unsigned long total_pages = pcxl_res_size << 3;        /* 8 bits per byte */
516 
517 	sprintf(buf, "\nDMA Mapping Area size    : %d bytes (%d pages)\n",
518 		PCXL_DMA_MAP_SIZE,
519 		(pcxl_res_size << 3) ); /* 1 bit per page */
520 
521 	sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
522 		buf, pcxl_res_size, pcxl_res_size << 3);   /* 8 bits per byte */
523 
524 	strcat(buf,  "     	  total:    free:    used:   % used:\n");
525 	sprintf(buf, "%sblocks  %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size,
526 		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
527 		(pcxl_used_bytes * 100) / pcxl_res_size);
528 
529 	sprintf(buf, "%spages   %8ld %8ld %8ld %8ld%%\n", buf, total_pages,
530 		total_pages - pcxl_used_pages, pcxl_used_pages,
531 		(pcxl_used_pages * 100 / total_pages));
532 
533 	strcat(buf, "\nResource bitmap:");
534 
535 	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
536 		if ((i & 7) == 0)
537 		    strcat(buf,"\n   ");
538 		sprintf(buf, "%s %08lx", buf, *res_ptr);
539 	}
540 	strcat(buf, "\n");
541 	return strlen(buf);
542 }
543 
544