1 /*
2 * linux/arch/arm/mm/consistent.c
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Dynamic DMA mapping support.
11 */
12 #include <linux/config.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/string.h>
16 #include <linux/vmalloc.h>
17 #include <linux/interrupt.h>
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21
22 #include <asm/io.h>
23 #include <asm/pgtable.h>
24 #include <asm/pgalloc.h>
25
26 /*
27 * This allocates one page of cache-coherent memory space and returns
28 * both the virtual and a "dma" address to that space. It is not clear
29 * whether this could be called from an interrupt context or not. For
30 * now, we expressly forbid it, especially as some of the stuff we do
31 * here is not interrupt context safe.
32 *
33 * We should allow this function to be called from interrupt context.
34 * However, we call ioremap, which needs to fiddle around with various
35 * things (like the vmlist_lock, and allocating page tables). These
36 * things aren't interrupt safe (yet).
37 *
38 * Note that this does *not* zero the allocated area!
39 */
consistent_alloc(int gfp,size_t size,dma_addr_t * dma_handle)40 void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
41 {
42 struct page *page, *end, *free;
43 unsigned long order;
44 void *ret;
45
46 /* FIXME */
47 if (in_interrupt())
48 BUG();
49
50 size = PAGE_ALIGN(size);
51 order = get_order(size);
52
53 page = alloc_pages(gfp, order);
54 if (!page)
55 goto no_page;
56
57 *dma_handle = page_to_bus(page);
58 ret = __ioremap(page_to_pfn(page) << PAGE_SHIFT, size, 0);
59 if (!ret)
60 goto no_remap;
61
62 #if 0 /* ioremap_does_flush_cache_all */
63 {
64 void *virt = page_address(page);
65
66 /*
67 * we need to ensure that there are no cachelines in use, or
68 * worse dirty in this area. Really, we don't need to do
69 * this since __ioremap does a flush_cache_all() anyway. --rmk
70 */
71 invalidate_dcache_range(virt, virt + size);
72 }
73 #endif
74
75 /*
76 * free wasted pages. We skip the first page since we know
77 * that it will have count = 1 and won't require freeing.
78 * We also mark the pages in use as reserved so that
79 * remap_page_range works.
80 */
81 free = page + (size >> PAGE_SHIFT);
82 end = page + (1 << order);
83
84 for (; page < end; page++) {
85 set_page_count(page, 1);
86 if (page >= free)
87 __free_page(page);
88 else
89 SetPageReserved(page);
90 }
91 return ret;
92
93 no_remap:
94 __free_pages(page, order);
95 no_page:
96 return NULL;
97 }
98
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * handle)99 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle)
100 {
101 int gfp = GFP_KERNEL;
102
103 #if defined(CONFIG_PCI) || defined(CONFIG_SA1111)
104 if ((hwdev) == NULL || dev_is_sa1111(hwdev) ||
105 (hwdev)->dma_mask != 0xffffffff)
106 #endif
107 gfp |= GFP_DMA;
108
109 return consistent_alloc(gfp, size, handle);
110 }
111
112 /*
113 * free a page as defined by the above mapping. We expressly forbid
114 * calling this from interrupt context.
115 */
consistent_free(void * vaddr,size_t size,dma_addr_t handle)116 void consistent_free(void *vaddr, size_t size, dma_addr_t handle)
117 {
118 struct page *page, *end;
119
120 if (in_interrupt())
121 BUG();
122
123 /*
124 * More messing around with the MM internals. This is
125 * sick, but then so is remap_page_range().
126 */
127 size = PAGE_ALIGN(size);
128 page = virt_to_page(bus_to_virt(handle));
129 end = page + (size >> PAGE_SHIFT);
130
131 for (; page < end; page++)
132 ClearPageReserved(page);
133
134 __iounmap(vaddr);
135 }
136
137 /*
138 * make an area consistent.
139 */
consistent_sync(void * vaddr,size_t size,int direction)140 void consistent_sync(void *vaddr, size_t size, int direction)
141 {
142 unsigned long start = (unsigned long)vaddr;
143 unsigned long end = start + size;
144
145 switch (direction) {
146 case PCI_DMA_NONE:
147 BUG();
148 case PCI_DMA_FROMDEVICE: /* invalidate only */
149 invalidate_dcache_range(start, end);
150 break;
151 case PCI_DMA_TODEVICE: /* writeback only */
152 clean_dcache_range(start, end);
153 break;
154 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
155 flush_dcache_range(start, end);
156 break;
157 }
158 }
159