1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 
19 #include <asm/cache.h>
20 #include <asm/io.h>
21 
22 #include <dma-coherence.h>
23 
dma_addr_to_virt(struct device * dev,dma_addr_t dma_addr)24 static inline unsigned long dma_addr_to_virt(struct device *dev,
25 	dma_addr_t dma_addr)
26 {
27 	unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28 
29 	return (unsigned long)phys_to_virt(addr);
30 }
31 
32 /*
33  * Warning on the terminology - Linux calls an uncached area coherent;
34  * MIPS terminology calls memory areas with hardware maintained coherency
35  * coherent.
36  */
37 
cpu_is_noncoherent_r10000(struct device * dev)38 static inline int cpu_is_noncoherent_r10000(struct device *dev)
39 {
40 	return !plat_device_is_coherent(dev) &&
41 	       (current_cpu_type() == CPU_R10000 ||
42 	       current_cpu_type() == CPU_R12000);
43 }
44 
massage_gfp_flags(const struct device * dev,gfp_t gfp)45 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46 {
47 	gfp_t dma_flag;
48 
49 	/* ignore region specifiers */
50 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
51 
52 #ifdef CONFIG_ISA
53 	if (dev == NULL)
54 		dma_flag = __GFP_DMA;
55 	else
56 #endif
57 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
58 	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59 			dma_flag = __GFP_DMA;
60 	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
61 			dma_flag = __GFP_DMA32;
62 	else
63 #endif
64 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
65 	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
66 		dma_flag = __GFP_DMA32;
67 	else
68 #endif
69 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
70 	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
71 		dma_flag = __GFP_DMA;
72 	else
73 #endif
74 		dma_flag = 0;
75 
76 	/* Don't invoke OOM killer */
77 	gfp |= __GFP_NORETRY;
78 
79 	return gfp | dma_flag;
80 }
81 
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)82 void *dma_alloc_noncoherent(struct device *dev, size_t size,
83 	dma_addr_t * dma_handle, gfp_t gfp)
84 {
85 	void *ret;
86 
87 	gfp = massage_gfp_flags(dev, gfp);
88 
89 	ret = (void *) __get_free_pages(gfp, get_order(size));
90 
91 	if (ret != NULL) {
92 		memset(ret, 0, size);
93 		*dma_handle = plat_map_dma_mem(dev, ret, size);
94 	}
95 
96 	return ret;
97 }
98 EXPORT_SYMBOL(dma_alloc_noncoherent);
99 
mips_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)100 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
101 	dma_addr_t * dma_handle, gfp_t gfp)
102 {
103 	void *ret;
104 
105 	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
106 		return ret;
107 
108 	gfp = massage_gfp_flags(dev, gfp);
109 
110 	ret = (void *) __get_free_pages(gfp, get_order(size));
111 
112 	if (ret) {
113 		memset(ret, 0, size);
114 		*dma_handle = plat_map_dma_mem(dev, ret, size);
115 
116 		if (!plat_device_is_coherent(dev)) {
117 			dma_cache_wback_inv((unsigned long) ret, size);
118 			ret = UNCAC_ADDR(ret);
119 		}
120 	}
121 
122 	return ret;
123 }
124 
125 
dma_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)126 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
127 	dma_addr_t dma_handle)
128 {
129 	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
130 	free_pages((unsigned long) vaddr, get_order(size));
131 }
132 EXPORT_SYMBOL(dma_free_noncoherent);
133 
mips_dma_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)134 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
135 	dma_addr_t dma_handle)
136 {
137 	unsigned long addr = (unsigned long) vaddr;
138 	int order = get_order(size);
139 
140 	if (dma_release_from_coherent(dev, order, vaddr))
141 		return;
142 
143 	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
144 
145 	if (!plat_device_is_coherent(dev))
146 		addr = CAC_ADDR(addr);
147 
148 	free_pages(addr, get_order(size));
149 }
150 
__dma_sync(unsigned long addr,size_t size,enum dma_data_direction direction)151 static inline void __dma_sync(unsigned long addr, size_t size,
152 	enum dma_data_direction direction)
153 {
154 	switch (direction) {
155 	case DMA_TO_DEVICE:
156 		dma_cache_wback(addr, size);
157 		break;
158 
159 	case DMA_FROM_DEVICE:
160 		dma_cache_inv(addr, size);
161 		break;
162 
163 	case DMA_BIDIRECTIONAL:
164 		dma_cache_wback_inv(addr, size);
165 		break;
166 
167 	default:
168 		BUG();
169 	}
170 }
171 
mips_dma_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)172 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
173 	size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
174 {
175 	if (cpu_is_noncoherent_r10000(dev))
176 		__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
177 		           direction);
178 
179 	plat_unmap_dma_mem(dev, dma_addr, size, direction);
180 }
181 
mips_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)182 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
183 	int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
184 {
185 	int i;
186 
187 	for (i = 0; i < nents; i++, sg++) {
188 		unsigned long addr;
189 
190 		addr = (unsigned long) sg_virt(sg);
191 		if (!plat_device_is_coherent(dev) && addr)
192 			__dma_sync(addr, sg->length, direction);
193 		sg->dma_address = plat_map_dma_mem(dev,
194 				                   (void *)addr, sg->length);
195 	}
196 
197 	return nents;
198 }
199 
mips_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)200 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
201 	unsigned long offset, size_t size, enum dma_data_direction direction,
202 	struct dma_attrs *attrs)
203 {
204 	unsigned long addr;
205 
206 	addr = (unsigned long) page_address(page) + offset;
207 
208 	if (!plat_device_is_coherent(dev))
209 		__dma_sync(addr, size, direction);
210 
211 	return plat_map_dma_mem(dev, (void *)addr, size);
212 }
213 
mips_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nhwentries,enum dma_data_direction direction,struct dma_attrs * attrs)214 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
215 	int nhwentries, enum dma_data_direction direction,
216 	struct dma_attrs *attrs)
217 {
218 	unsigned long addr;
219 	int i;
220 
221 	for (i = 0; i < nhwentries; i++, sg++) {
222 		if (!plat_device_is_coherent(dev) &&
223 		    direction != DMA_TO_DEVICE) {
224 			addr = (unsigned long) sg_virt(sg);
225 			if (addr)
226 				__dma_sync(addr, sg->length, direction);
227 		}
228 		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
229 	}
230 }
231 
mips_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)232 static void mips_dma_sync_single_for_cpu(struct device *dev,
233 	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
234 {
235 	if (cpu_is_noncoherent_r10000(dev)) {
236 		unsigned long addr;
237 
238 		addr = dma_addr_to_virt(dev, dma_handle);
239 		__dma_sync(addr, size, direction);
240 	}
241 }
242 
mips_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)243 static void mips_dma_sync_single_for_device(struct device *dev,
244 	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
245 {
246 	plat_extra_sync_for_device(dev);
247 	if (!plat_device_is_coherent(dev)) {
248 		unsigned long addr;
249 
250 		addr = dma_addr_to_virt(dev, dma_handle);
251 		__dma_sync(addr, size, direction);
252 	}
253 }
254 
mips_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction direction)255 static void mips_dma_sync_sg_for_cpu(struct device *dev,
256 	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
257 {
258 	int i;
259 
260 	/* Make sure that gcc doesn't leave the empty loop body.  */
261 	for (i = 0; i < nelems; i++, sg++) {
262 		if (cpu_is_noncoherent_r10000(dev))
263 			__dma_sync((unsigned long)page_address(sg_page(sg)),
264 			           sg->length, direction);
265 	}
266 }
267 
mips_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction direction)268 static void mips_dma_sync_sg_for_device(struct device *dev,
269 	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
270 {
271 	int i;
272 
273 	/* Make sure that gcc doesn't leave the empty loop body.  */
274 	for (i = 0; i < nelems; i++, sg++) {
275 		if (!plat_device_is_coherent(dev))
276 			__dma_sync((unsigned long)page_address(sg_page(sg)),
277 			           sg->length, direction);
278 	}
279 }
280 
mips_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)281 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
282 {
283 	return plat_dma_mapping_error(dev, dma_addr);
284 }
285 
mips_dma_supported(struct device * dev,u64 mask)286 int mips_dma_supported(struct device *dev, u64 mask)
287 {
288 	return plat_dma_supported(dev, mask);
289 }
290 
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction direction)291 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
292 			 enum dma_data_direction direction)
293 {
294 	BUG_ON(direction == DMA_NONE);
295 
296 	plat_extra_sync_for_device(dev);
297 	if (!plat_device_is_coherent(dev))
298 		__dma_sync((unsigned long)vaddr, size, direction);
299 }
300 
301 EXPORT_SYMBOL(dma_cache_sync);
302 
303 static struct dma_map_ops mips_default_dma_map_ops = {
304 	.alloc_coherent = mips_dma_alloc_coherent,
305 	.free_coherent = mips_dma_free_coherent,
306 	.map_page = mips_dma_map_page,
307 	.unmap_page = mips_dma_unmap_page,
308 	.map_sg = mips_dma_map_sg,
309 	.unmap_sg = mips_dma_unmap_sg,
310 	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
311 	.sync_single_for_device = mips_dma_sync_single_for_device,
312 	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
313 	.sync_sg_for_device = mips_dma_sync_sg_for_device,
314 	.mapping_error = mips_dma_mapping_error,
315 	.dma_supported = mips_dma_supported
316 };
317 
318 struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
319 EXPORT_SYMBOL(mips_dma_map_ops);
320 
321 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
322 
mips_dma_init(void)323 static int __init mips_dma_init(void)
324 {
325 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
326 
327 	return 0;
328 }
329 fs_initcall(mips_dma_init);
330