1 /*
2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
6 **
7 **      (c) Copyright 1999,2000 Hewlett-Packard Company
8 **      (c) Copyright 2000 Grant Grundler
9 **	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 **      (c) Copyright 2000 John Marvin
11 **
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
14 **
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
16 **
17 ** - ggg
18 */
19 
20 #include <linux/init.h>
21 #include <linux/gfp.h>
22 #include <linux/mm.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/scatterlist.h>
29 
30 #include <asm/cacheflush.h>
31 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
32 #include <asm/io.h>
33 #include <asm/page.h>	/* get_order */
34 #include <asm/pgalloc.h>
35 #include <asm/uaccess.h>
36 #include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
37 
38 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
39 static unsigned long pcxl_used_bytes __read_mostly = 0;
40 static unsigned long pcxl_used_pages __read_mostly = 0;
41 
42 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
43 static spinlock_t   pcxl_res_lock;
44 static char    *pcxl_res_map;
45 static int     pcxl_res_hint;
46 static int     pcxl_res_size;
47 
48 #ifdef DEBUG_PCXL_RESOURCE
49 #define DBG_RES(x...)	printk(x)
50 #else
51 #define DBG_RES(x...)
52 #endif
53 
54 
55 /*
56 ** Dump a hex representation of the resource map.
57 */
58 
59 #ifdef DUMP_RESMAP
60 static
dump_resmap(void)61 void dump_resmap(void)
62 {
63 	u_long *res_ptr = (unsigned long *)pcxl_res_map;
64 	u_long i = 0;
65 
66 	printk("res_map: ");
67 	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
68 		printk("%08lx ", *res_ptr);
69 
70 	printk("\n");
71 }
72 #else
dump_resmap(void)73 static inline void dump_resmap(void) {;}
74 #endif
75 
pa11_dma_supported(struct device * dev,u64 mask)76 static int pa11_dma_supported( struct device *dev, u64 mask)
77 {
78 	return 1;
79 }
80 
map_pte_uncached(pte_t * pte,unsigned long vaddr,unsigned long size,unsigned long * paddr_ptr)81 static inline int map_pte_uncached(pte_t * pte,
82 		unsigned long vaddr,
83 		unsigned long size, unsigned long *paddr_ptr)
84 {
85 	unsigned long end;
86 	unsigned long orig_vaddr = vaddr;
87 
88 	vaddr &= ~PMD_MASK;
89 	end = vaddr + size;
90 	if (end > PMD_SIZE)
91 		end = PMD_SIZE;
92 	do {
93 		unsigned long flags;
94 
95 		if (!pte_none(*pte))
96 			printk(KERN_ERR "map_pte_uncached: page already exists\n");
97 		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
98 		purge_tlb_start(flags);
99 		pdtlb_kernel(orig_vaddr);
100 		purge_tlb_end(flags);
101 		vaddr += PAGE_SIZE;
102 		orig_vaddr += PAGE_SIZE;
103 		(*paddr_ptr) += PAGE_SIZE;
104 		pte++;
105 	} while (vaddr < end);
106 	return 0;
107 }
108 
map_pmd_uncached(pmd_t * pmd,unsigned long vaddr,unsigned long size,unsigned long * paddr_ptr)109 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
110 		unsigned long size, unsigned long *paddr_ptr)
111 {
112 	unsigned long end;
113 	unsigned long orig_vaddr = vaddr;
114 
115 	vaddr &= ~PGDIR_MASK;
116 	end = vaddr + size;
117 	if (end > PGDIR_SIZE)
118 		end = PGDIR_SIZE;
119 	do {
120 		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
121 		if (!pte)
122 			return -ENOMEM;
123 		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
124 			return -ENOMEM;
125 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
126 		orig_vaddr += PMD_SIZE;
127 		pmd++;
128 	} while (vaddr < end);
129 	return 0;
130 }
131 
map_uncached_pages(unsigned long vaddr,unsigned long size,unsigned long paddr)132 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
133 		unsigned long paddr)
134 {
135 	pgd_t * dir;
136 	unsigned long end = vaddr + size;
137 
138 	dir = pgd_offset_k(vaddr);
139 	do {
140 		pmd_t *pmd;
141 
142 		pmd = pmd_alloc(NULL, dir, vaddr);
143 		if (!pmd)
144 			return -ENOMEM;
145 		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
146 			return -ENOMEM;
147 		vaddr = vaddr + PGDIR_SIZE;
148 		dir++;
149 	} while (vaddr && (vaddr < end));
150 	return 0;
151 }
152 
unmap_uncached_pte(pmd_t * pmd,unsigned long vaddr,unsigned long size)153 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
154 		unsigned long size)
155 {
156 	pte_t * pte;
157 	unsigned long end;
158 	unsigned long orig_vaddr = vaddr;
159 
160 	if (pmd_none(*pmd))
161 		return;
162 	if (pmd_bad(*pmd)) {
163 		pmd_ERROR(*pmd);
164 		pmd_clear(pmd);
165 		return;
166 	}
167 	pte = pte_offset_map(pmd, vaddr);
168 	vaddr &= ~PMD_MASK;
169 	end = vaddr + size;
170 	if (end > PMD_SIZE)
171 		end = PMD_SIZE;
172 	do {
173 		unsigned long flags;
174 		pte_t page = *pte;
175 
176 		pte_clear(&init_mm, vaddr, pte);
177 		purge_tlb_start(flags);
178 		pdtlb_kernel(orig_vaddr);
179 		purge_tlb_end(flags);
180 		vaddr += PAGE_SIZE;
181 		orig_vaddr += PAGE_SIZE;
182 		pte++;
183 		if (pte_none(page) || pte_present(page))
184 			continue;
185 		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
186 	} while (vaddr < end);
187 }
188 
unmap_uncached_pmd(pgd_t * dir,unsigned long vaddr,unsigned long size)189 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
190 		unsigned long size)
191 {
192 	pmd_t * pmd;
193 	unsigned long end;
194 	unsigned long orig_vaddr = vaddr;
195 
196 	if (pgd_none(*dir))
197 		return;
198 	if (pgd_bad(*dir)) {
199 		pgd_ERROR(*dir);
200 		pgd_clear(dir);
201 		return;
202 	}
203 	pmd = pmd_offset(dir, vaddr);
204 	vaddr &= ~PGDIR_MASK;
205 	end = vaddr + size;
206 	if (end > PGDIR_SIZE)
207 		end = PGDIR_SIZE;
208 	do {
209 		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
210 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
211 		orig_vaddr += PMD_SIZE;
212 		pmd++;
213 	} while (vaddr < end);
214 }
215 
unmap_uncached_pages(unsigned long vaddr,unsigned long size)216 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
217 {
218 	pgd_t * dir;
219 	unsigned long end = vaddr + size;
220 
221 	dir = pgd_offset_k(vaddr);
222 	do {
223 		unmap_uncached_pmd(dir, vaddr, end - vaddr);
224 		vaddr = vaddr + PGDIR_SIZE;
225 		dir++;
226 	} while (vaddr && (vaddr < end));
227 }
228 
229 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
230        for(; res_ptr < res_end; ++res_ptr) \
231        { \
232                if(0 == ((*res_ptr) & mask)) { \
233                        *res_ptr |= mask; \
234 		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
235 		       pcxl_res_hint = idx + (size >> 3); \
236                        goto resource_found; \
237                } \
238        }
239 
240 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
241        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
242        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
243        PCXL_SEARCH_LOOP(idx, mask, size); \
244        res_ptr = (u##size *)&pcxl_res_map[0]; \
245        PCXL_SEARCH_LOOP(idx, mask, size); \
246 }
247 
248 unsigned long
pcxl_alloc_range(size_t size)249 pcxl_alloc_range(size_t size)
250 {
251 	int res_idx;
252 	u_long mask, flags;
253 	unsigned int pages_needed = size >> PAGE_SHIFT;
254 
255 	mask = (u_long) -1L;
256  	mask >>= BITS_PER_LONG - pages_needed;
257 
258 	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
259 		size, pages_needed, mask);
260 
261 	spin_lock_irqsave(&pcxl_res_lock, flags);
262 
263 	if(pages_needed <= 8) {
264 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
265 	} else if(pages_needed <= 16) {
266 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
267 	} else if(pages_needed <= 32) {
268 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
269 	} else {
270 		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
271 		      __FILE__);
272 	}
273 
274 	dump_resmap();
275 	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
276 	      __FILE__);
277 
278 resource_found:
279 
280 	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
281 		res_idx, mask, pcxl_res_hint);
282 
283 	pcxl_used_pages += pages_needed;
284 	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
285 
286 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
287 
288 	dump_resmap();
289 
290 	/*
291 	** return the corresponding vaddr in the pcxl dma map
292 	*/
293 	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
294 }
295 
296 #define PCXL_FREE_MAPPINGS(idx, m, size) \
297 		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
298 		/* BUG_ON((*res_ptr & m) != m); */ \
299 		*res_ptr &= ~m;
300 
301 /*
302 ** clear bits in the pcxl resource map
303 */
304 static void
pcxl_free_range(unsigned long vaddr,size_t size)305 pcxl_free_range(unsigned long vaddr, size_t size)
306 {
307 	u_long mask, flags;
308 	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
309 	unsigned int pages_mapped = size >> PAGE_SHIFT;
310 
311 	mask = (u_long) -1L;
312  	mask >>= BITS_PER_LONG - pages_mapped;
313 
314 	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
315 		res_idx, size, pages_mapped, mask);
316 
317 	spin_lock_irqsave(&pcxl_res_lock, flags);
318 
319 	if(pages_mapped <= 8) {
320 		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
321 	} else if(pages_mapped <= 16) {
322 		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
323 	} else if(pages_mapped <= 32) {
324 		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
325 	} else {
326 		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
327 		      __FILE__);
328 	}
329 
330 	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
331 	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
332 
333 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
334 
335 	dump_resmap();
336 }
337 
proc_pcxl_dma_show(struct seq_file * m,void * v)338 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
339 {
340 #if 0
341 	u_long i = 0;
342 	unsigned long *res_ptr = (u_long *)pcxl_res_map;
343 #endif
344 	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
345 
346 	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
347 		PCXL_DMA_MAP_SIZE, total_pages);
348 
349 	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
350 
351 	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
352 	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
353 		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
354 		(pcxl_used_bytes * 100) / pcxl_res_size);
355 
356 	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
357 		total_pages - pcxl_used_pages, pcxl_used_pages,
358 		(pcxl_used_pages * 100 / total_pages));
359 
360 #if 0
361 	seq_puts(m, "\nResource bitmap:");
362 
363 	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
364 		if ((i & 7) == 0)
365 		    seq_puts(m,"\n   ");
366 		seq_printf(m, "%s %08lx", buf, *res_ptr);
367 	}
368 #endif
369 	seq_putc(m, '\n');
370 	return 0;
371 }
372 
proc_pcxl_dma_open(struct inode * inode,struct file * file)373 static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
374 {
375 	return single_open(file, proc_pcxl_dma_show, NULL);
376 }
377 
378 static const struct file_operations proc_pcxl_dma_ops = {
379 	.owner		= THIS_MODULE,
380 	.open		= proc_pcxl_dma_open,
381 	.read		= seq_read,
382 	.llseek		= seq_lseek,
383 	.release	= single_release,
384 };
385 
386 static int __init
pcxl_dma_init(void)387 pcxl_dma_init(void)
388 {
389 	if (pcxl_dma_start == 0)
390 		return 0;
391 
392 	spin_lock_init(&pcxl_res_lock);
393 	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
394 	pcxl_res_hint = 0;
395 	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
396 					    get_order(pcxl_res_size));
397 	memset(pcxl_res_map, 0, pcxl_res_size);
398 	proc_gsc_root = proc_mkdir("gsc", NULL);
399 	if (!proc_gsc_root)
400     		printk(KERN_WARNING
401 			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
402 	else {
403 		struct proc_dir_entry* ent;
404 		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
405 				  &proc_pcxl_dma_ops);
406 		if (!ent)
407 			printk(KERN_WARNING
408 				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
409 	}
410 	return 0;
411 }
412 
413 __initcall(pcxl_dma_init);
414 
pa11_dma_alloc_consistent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)415 static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
416 {
417 	unsigned long vaddr;
418 	unsigned long paddr;
419 	int order;
420 
421 	order = get_order(size);
422 	size = 1 << (order + PAGE_SHIFT);
423 	vaddr = pcxl_alloc_range(size);
424 	paddr = __get_free_pages(flag, order);
425 	flush_kernel_dcache_range(paddr, size);
426 	paddr = __pa(paddr);
427 	map_uncached_pages(vaddr, size, paddr);
428 	*dma_handle = (dma_addr_t) paddr;
429 
430 #if 0
431 /* This probably isn't needed to support EISA cards.
432 ** ISA cards will certainly only support 24-bit DMA addressing.
433 ** Not clear if we can, want, or need to support ISA.
434 */
435 	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
436 		gfp |= GFP_DMA;
437 #endif
438 	return (void *)vaddr;
439 }
440 
pa11_dma_free_consistent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)441 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
442 {
443 	int order;
444 
445 	order = get_order(size);
446 	size = 1 << (order + PAGE_SHIFT);
447 	unmap_uncached_pages((unsigned long)vaddr, size);
448 	pcxl_free_range((unsigned long)vaddr, size);
449 	free_pages((unsigned long)__va(dma_handle), order);
450 }
451 
pa11_dma_map_single(struct device * dev,void * addr,size_t size,enum dma_data_direction direction)452 static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
453 {
454 	BUG_ON(direction == DMA_NONE);
455 
456 	flush_kernel_dcache_range((unsigned long) addr, size);
457 	return virt_to_phys(addr);
458 }
459 
pa11_dma_unmap_single(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)460 static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
461 {
462 	BUG_ON(direction == DMA_NONE);
463 
464 	if (direction == DMA_TO_DEVICE)
465 	    return;
466 
467 	/*
468 	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
469 	 * simple map/unmap case. However, it IS necessary if if
470 	 * pci_dma_sync_single_* has been called and the buffer reused.
471 	 */
472 
473 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
474 	return;
475 }
476 
pa11_dma_map_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)477 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
478 {
479 	int i;
480 
481 	BUG_ON(direction == DMA_NONE);
482 
483 	for (i = 0; i < nents; i++, sglist++ ) {
484 		unsigned long vaddr = sg_virt_addr(sglist);
485 		sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
486 		sg_dma_len(sglist) = sglist->length;
487 		flush_kernel_dcache_range(vaddr, sglist->length);
488 	}
489 	return nents;
490 }
491 
pa11_dma_unmap_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)492 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
493 {
494 	int i;
495 
496 	BUG_ON(direction == DMA_NONE);
497 
498 	if (direction == DMA_TO_DEVICE)
499 	    return;
500 
501 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
502 
503 	for (i = 0; i < nents; i++, sglist++ )
504 		flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
505 	return;
506 }
507 
pa11_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)508 static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
509 {
510 	BUG_ON(direction == DMA_NONE);
511 
512 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
513 }
514 
pa11_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)515 static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
516 {
517 	BUG_ON(direction == DMA_NONE);
518 
519 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
520 }
521 
pa11_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)522 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
523 {
524 	int i;
525 
526 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
527 
528 	for (i = 0; i < nents; i++, sglist++ )
529 		flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
530 }
531 
pa11_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)532 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
533 {
534 	int i;
535 
536 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
537 
538 	for (i = 0; i < nents; i++, sglist++ )
539 		flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
540 }
541 
542 struct hppa_dma_ops pcxl_dma_ops = {
543 	.dma_supported =	pa11_dma_supported,
544 	.alloc_consistent =	pa11_dma_alloc_consistent,
545 	.alloc_noncoherent =	pa11_dma_alloc_consistent,
546 	.free_consistent =	pa11_dma_free_consistent,
547 	.map_single =		pa11_dma_map_single,
548 	.unmap_single =		pa11_dma_unmap_single,
549 	.map_sg =		pa11_dma_map_sg,
550 	.unmap_sg =		pa11_dma_unmap_sg,
551 	.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
552 	.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
553 	.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
554 	.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
555 };
556 
fail_alloc_consistent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)557 static void *fail_alloc_consistent(struct device *dev, size_t size,
558 				   dma_addr_t *dma_handle, gfp_t flag)
559 {
560 	return NULL;
561 }
562 
pa11_dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)563 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
564 					  dma_addr_t *dma_handle, gfp_t flag)
565 {
566 	void *addr;
567 
568 	addr = (void *)__get_free_pages(flag, get_order(size));
569 	if (addr)
570 		*dma_handle = (dma_addr_t)virt_to_phys(addr);
571 
572 	return addr;
573 }
574 
pa11_dma_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t iova)575 static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
576 					void *vaddr, dma_addr_t iova)
577 {
578 	free_pages((unsigned long)vaddr, get_order(size));
579 	return;
580 }
581 
582 struct hppa_dma_ops pcx_dma_ops = {
583 	.dma_supported =	pa11_dma_supported,
584 	.alloc_consistent =	fail_alloc_consistent,
585 	.alloc_noncoherent =	pa11_dma_alloc_noncoherent,
586 	.free_consistent =	pa11_dma_free_noncoherent,
587 	.map_single =		pa11_dma_map_single,
588 	.unmap_single =		pa11_dma_unmap_single,
589 	.map_sg =		pa11_dma_map_sg,
590 	.unmap_sg =		pa11_dma_unmap_sg,
591 	.dma_sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
592 	.dma_sync_single_for_device =	pa11_dma_sync_single_for_device,
593 	.dma_sync_sg_for_cpu =		pa11_dma_sync_sg_for_cpu,
594 	.dma_sync_sg_for_device =	pa11_dma_sync_sg_for_device,
595 };
596