1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7 #undef DEBUG
8
9 #include <linux/dma-mapping.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/export.h>
16
17 #include <asm/pgalloc.h>
18
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * handle,gfp_t flag)19 void *dma_alloc_coherent(struct device *dev, size_t size,
20 dma_addr_t *handle, gfp_t flag)
21 {
22 struct page *page, **map;
23 pgprot_t pgprot;
24 void *addr;
25 int i, order;
26
27 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
28
29 size = PAGE_ALIGN(size);
30 order = get_order(size);
31
32 page = alloc_pages(flag, order);
33 if (!page)
34 return NULL;
35
36 *handle = page_to_phys(page);
37 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
38 if (!map) {
39 __free_pages(page, order);
40 return NULL;
41 }
42 split_page(page, order);
43
44 order = 1 << order;
45 size >>= PAGE_SHIFT;
46 map[0] = page;
47 for (i = 1; i < size; i++)
48 map[i] = page + i;
49 for (; i < order; i++)
50 __free_page(page + i);
51 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
52 if (CPU_IS_040_OR_060)
53 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
54 else
55 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
56 addr = vmap(map, size, VM_MAP, pgprot);
57 kfree(map);
58
59 return addr;
60 }
61 EXPORT_SYMBOL(dma_alloc_coherent);
62
dma_free_coherent(struct device * dev,size_t size,void * addr,dma_addr_t handle)63 void dma_free_coherent(struct device *dev, size_t size,
64 void *addr, dma_addr_t handle)
65 {
66 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
67 vfree(addr);
68 }
69 EXPORT_SYMBOL(dma_free_coherent);
70
dma_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)71 void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
72 size_t size, enum dma_data_direction dir)
73 {
74 switch (dir) {
75 case DMA_TO_DEVICE:
76 cache_push(handle, size);
77 break;
78 case DMA_FROM_DEVICE:
79 cache_clear(handle, size);
80 break;
81 default:
82 if (printk_ratelimit())
83 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
84 break;
85 }
86 }
87 EXPORT_SYMBOL(dma_sync_single_for_device);
88
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)89 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
90 enum dma_data_direction dir)
91 {
92 int i;
93
94 for (i = 0; i < nents; sg++, i++)
95 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
96 }
97 EXPORT_SYMBOL(dma_sync_sg_for_device);
98
dma_map_single(struct device * dev,void * addr,size_t size,enum dma_data_direction dir)99 dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
100 enum dma_data_direction dir)
101 {
102 dma_addr_t handle = virt_to_bus(addr);
103
104 dma_sync_single_for_device(dev, handle, size, dir);
105 return handle;
106 }
107 EXPORT_SYMBOL(dma_map_single);
108
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir)109 dma_addr_t dma_map_page(struct device *dev, struct page *page,
110 unsigned long offset, size_t size,
111 enum dma_data_direction dir)
112 {
113 dma_addr_t handle = page_to_phys(page) + offset;
114
115 dma_sync_single_for_device(dev, handle, size, dir);
116 return handle;
117 }
118 EXPORT_SYMBOL(dma_map_page);
119
dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)120 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
121 enum dma_data_direction dir)
122 {
123 int i;
124
125 for (i = 0; i < nents; sg++, i++) {
126 sg->dma_address = sg_phys(sg);
127 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
128 }
129 return nents;
130 }
131 EXPORT_SYMBOL(dma_map_sg);
132