1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3 
4 #include <linux/string.h>
5 #include <linux/device.h>
6 #include <linux/err.h>
7 #include <linux/dma-attrs.h>
8 #include <linux/dma-direction.h>
9 #include <linux/scatterlist.h>
10 
11 struct dma_map_ops {
12 	void* (*alloc)(struct device *dev, size_t size,
13 				dma_addr_t *dma_handle, gfp_t gfp,
14 				struct dma_attrs *attrs);
15 	void (*free)(struct device *dev, size_t size,
16 			      void *vaddr, dma_addr_t dma_handle,
17 			      struct dma_attrs *attrs);
18 	int (*mmap)(struct device *, struct vm_area_struct *,
19 			  void *, dma_addr_t, size_t, struct dma_attrs *attrs);
20 
21 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 			       unsigned long offset, size_t size,
23 			       enum dma_data_direction dir,
24 			       struct dma_attrs *attrs);
25 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
26 			   size_t size, enum dma_data_direction dir,
27 			   struct dma_attrs *attrs);
28 	int (*map_sg)(struct device *dev, struct scatterlist *sg,
29 		      int nents, enum dma_data_direction dir,
30 		      struct dma_attrs *attrs);
31 	void (*unmap_sg)(struct device *dev,
32 			 struct scatterlist *sg, int nents,
33 			 enum dma_data_direction dir,
34 			 struct dma_attrs *attrs);
35 	void (*sync_single_for_cpu)(struct device *dev,
36 				    dma_addr_t dma_handle, size_t size,
37 				    enum dma_data_direction dir);
38 	void (*sync_single_for_device)(struct device *dev,
39 				       dma_addr_t dma_handle, size_t size,
40 				       enum dma_data_direction dir);
41 	void (*sync_sg_for_cpu)(struct device *dev,
42 				struct scatterlist *sg, int nents,
43 				enum dma_data_direction dir);
44 	void (*sync_sg_for_device)(struct device *dev,
45 				   struct scatterlist *sg, int nents,
46 				   enum dma_data_direction dir);
47 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
48 	int (*dma_supported)(struct device *dev, u64 mask);
49 	int (*set_dma_mask)(struct device *dev, u64 mask);
50 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
51 	u64 (*get_required_mask)(struct device *dev);
52 #endif
53 	int is_phys;
54 };
55 
56 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57 
58 #define DMA_MASK_NONE	0x0ULL
59 
valid_dma_direction(int dma_direction)60 static inline int valid_dma_direction(int dma_direction)
61 {
62 	return ((dma_direction == DMA_BIDIRECTIONAL) ||
63 		(dma_direction == DMA_TO_DEVICE) ||
64 		(dma_direction == DMA_FROM_DEVICE));
65 }
66 
is_device_dma_capable(struct device * dev)67 static inline int is_device_dma_capable(struct device *dev)
68 {
69 	return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
70 }
71 
72 #ifdef CONFIG_HAS_DMA
73 #include <asm/dma-mapping.h>
74 #else
75 #include <asm-generic/dma-mapping-broken.h>
76 #endif
77 
dma_get_mask(struct device * dev)78 static inline u64 dma_get_mask(struct device *dev)
79 {
80 	if (dev && dev->dma_mask && *dev->dma_mask)
81 		return *dev->dma_mask;
82 	return DMA_BIT_MASK(32);
83 }
84 
85 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
86 int dma_set_coherent_mask(struct device *dev, u64 mask);
87 #else
dma_set_coherent_mask(struct device * dev,u64 mask)88 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
89 {
90 	if (!dma_supported(dev, mask))
91 		return -EIO;
92 	dev->coherent_dma_mask = mask;
93 	return 0;
94 }
95 #endif
96 
97 extern u64 dma_get_required_mask(struct device *dev);
98 
dma_get_max_seg_size(struct device * dev)99 static inline unsigned int dma_get_max_seg_size(struct device *dev)
100 {
101 	return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
102 }
103 
dma_set_max_seg_size(struct device * dev,unsigned int size)104 static inline unsigned int dma_set_max_seg_size(struct device *dev,
105 						unsigned int size)
106 {
107 	if (dev->dma_parms) {
108 		dev->dma_parms->max_segment_size = size;
109 		return 0;
110 	} else
111 		return -EIO;
112 }
113 
dma_get_seg_boundary(struct device * dev)114 static inline unsigned long dma_get_seg_boundary(struct device *dev)
115 {
116 	return dev->dma_parms ?
117 		dev->dma_parms->segment_boundary_mask : 0xffffffff;
118 }
119 
dma_set_seg_boundary(struct device * dev,unsigned long mask)120 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
121 {
122 	if (dev->dma_parms) {
123 		dev->dma_parms->segment_boundary_mask = mask;
124 		return 0;
125 	} else
126 		return -EIO;
127 }
128 
dma_zalloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)129 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
130 					dma_addr_t *dma_handle, gfp_t flag)
131 {
132 	void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
133 	if (ret)
134 		memset(ret, 0, size);
135 	return ret;
136 }
137 
138 #ifdef CONFIG_HAS_DMA
dma_get_cache_alignment(void)139 static inline int dma_get_cache_alignment(void)
140 {
141 #ifdef ARCH_DMA_MINALIGN
142 	return ARCH_DMA_MINALIGN;
143 #endif
144 	return 1;
145 }
146 #endif
147 
148 /* flags for the coherent memory api */
149 #define	DMA_MEMORY_MAP			0x01
150 #define DMA_MEMORY_IO			0x02
151 #define DMA_MEMORY_INCLUDES_CHILDREN	0x04
152 #define DMA_MEMORY_EXCLUSIVE		0x08
153 
154 #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
155 static inline int
dma_declare_coherent_memory(struct device * dev,dma_addr_t bus_addr,dma_addr_t device_addr,size_t size,int flags)156 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
157 			    dma_addr_t device_addr, size_t size, int flags)
158 {
159 	return 0;
160 }
161 
162 static inline void
dma_release_declared_memory(struct device * dev)163 dma_release_declared_memory(struct device *dev)
164 {
165 }
166 
167 static inline void *
dma_mark_declared_memory_occupied(struct device * dev,dma_addr_t device_addr,size_t size)168 dma_mark_declared_memory_occupied(struct device *dev,
169 				  dma_addr_t device_addr, size_t size)
170 {
171 	return ERR_PTR(-EBUSY);
172 }
173 #endif
174 
175 /*
176  * Managed DMA API
177  */
178 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
179 				 dma_addr_t *dma_handle, gfp_t gfp);
180 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
181 			       dma_addr_t dma_handle);
182 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
183 				    dma_addr_t *dma_handle, gfp_t gfp);
184 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
185 				  dma_addr_t dma_handle);
186 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
187 extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
188 					dma_addr_t device_addr, size_t size,
189 					int flags);
190 extern void dmam_release_declared_memory(struct device *dev);
191 #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
dmam_declare_coherent_memory(struct device * dev,dma_addr_t bus_addr,dma_addr_t device_addr,size_t size,gfp_t gfp)192 static inline int dmam_declare_coherent_memory(struct device *dev,
193 				dma_addr_t bus_addr, dma_addr_t device_addr,
194 				size_t size, gfp_t gfp)
195 {
196 	return 0;
197 }
198 
dmam_release_declared_memory(struct device * dev)199 static inline void dmam_release_declared_memory(struct device *dev)
200 {
201 }
202 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
203 
204 #ifndef CONFIG_HAVE_DMA_ATTRS
205 struct dma_attrs;
206 
207 #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
208 	dma_map_single(dev, cpu_addr, size, dir)
209 
210 #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
211 	dma_unmap_single(dev, dma_addr, size, dir)
212 
213 #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
214 	dma_map_sg(dev, sgl, nents, dir)
215 
216 #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
217 	dma_unmap_sg(dev, sgl, nents, dir)
218 
219 #endif /* CONFIG_HAVE_DMA_ATTRS */
220 
221 #ifdef CONFIG_NEED_DMA_MAP_STATE
222 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
223 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
224 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
225 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
226 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
227 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
228 #else
229 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
230 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
231 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
232 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
233 #define dma_unmap_len(PTR, LEN_NAME)             (0)
234 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
235 #endif
236 
237 #endif
238