1 /*
2 * Dynamic DMA mapping support.
3 *
4 * This implementation is for IA-64 platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 *
9 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
10 * unnecessary i-cache flushing.
11 */
12
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/spinlock.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19
20 #include <asm/io.h>
21 #include <asm/pci.h>
22 #include <asm/dma.h>
23
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26
27 #define ALIGN(val, align) ((unsigned long) \
28 (((unsigned long) (val) + ((align) - 1)) & ~((align) - 1)))
29
30 #define OFFSET(val,align) ((unsigned long) \
31 ( (val) & ( (align) - 1)))
32
33 #define SG_ENT_VIRT_ADDRESS(sg) ((sg)->address ? (sg)->address \
34 : page_address((sg)->page) + (sg)->offset)
35 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
36
37 /*
38 * Maximum allowable number of contiguous slabs to map,
39 * must be a power of 2. What is the appropriate value ?
40 * The complexity of {map,unmap}_single is linearly dependent on this value.
41 */
42 #define IO_TLB_SEGSIZE 128
43
44 /*
45 * log of the size of each IO TLB slab. The number of slabs is command line controllable.
46 */
47 #define IO_TLB_SHIFT 11
48
49 /*
50 * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single, to see
51 * if the memory was in fact allocated by this API.
52 */
53 char *io_tlb_start, *io_tlb_end;
54
55 /*
56 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
57 * This is command line adjustable via setup_io_tlb_npages.
58 */
59 static unsigned long io_tlb_nslabs = 32768;
60
61 /*
62 * This is a free list describing the number of free entries available from each index
63 */
64 static unsigned int *io_tlb_list;
65 static unsigned int io_tlb_index;
66
67 /*
68 * We need to save away the original address corresponding to a mapped entry for the sync
69 * operations.
70 */
71 static unsigned char **io_tlb_orig_addr;
72
73 /*
74 * Protect the above data structures in the map and unmap calls
75 */
76 static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
77
78 static int __init
setup_io_tlb_npages(char * str)79 setup_io_tlb_npages (char *str)
80 {
81 io_tlb_nslabs = simple_strtoul(str, NULL, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
82
83 /* avoid tail segment of size < IO_TLB_SEGSIZE */
84 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
85
86 return 1;
87 }
88 __setup("swiotlb=", setup_io_tlb_npages);
89
90
91 /*
92 * Statically reserve bounce buffer space and initialize bounce buffer data structures for
93 * the software IO TLB used to implement the PCI DMA API.
94 */
95 void
swiotlb_init(void)96 swiotlb_init (void)
97 {
98 int i;
99
100 /*
101 * Get IO TLB memory from the low pages
102 */
103 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
104 if (!io_tlb_start)
105 BUG();
106 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
107
108 /*
109 * Allocate and initialize the free list array. This array is used
110 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
111 * between io_tlb_start and io_tlb_end.
112 */
113 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
114 for (i = 0; i < io_tlb_nslabs; i++)
115 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
116 io_tlb_index = 0;
117 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
118
119 printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
120 (void *) io_tlb_start, (void *) io_tlb_end);
121 }
122
123 /*
124 * Allocates bounce buffer and returns its kernel virtual address.
125 */
126 static void *
map_single(struct pci_dev * hwdev,char * buffer,size_t size,int direction)127 map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
128 {
129 unsigned long flags;
130 char *dma_addr;
131 unsigned int nslots, stride, index, wrap;
132 int i;
133
134 /*
135 * For mappings greater than a page size, we limit the stride (and hence alignment)
136 * to a page size.
137 */
138 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
139 if (size > (1 << PAGE_SHIFT))
140 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
141 else
142 stride = 1;
143
144 if (!nslots)
145 BUG();
146
147 /*
148 * Find suitable number of IO TLB entries size that will fit this request and
149 * allocate a buffer from that IO TLB pool.
150 */
151 spin_lock_irqsave(&io_tlb_lock, flags);
152 {
153 wrap = index = ALIGN(io_tlb_index, stride);
154
155 if (index >= io_tlb_nslabs)
156 wrap = index = 0;
157
158 do {
159 /*
160 * If we find a slot that indicates we have 'nslots' number of
161 * contiguous buffers, we allocate the buffers from that slot and
162 * mark the entries as '0' indicating unavailable.
163 */
164 if (io_tlb_list[index] >= nslots) {
165 int count = 0;
166
167 for (i = index; i < index + nslots; i++)
168 io_tlb_list[i] = 0;
169 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
170 && io_tlb_list[i]; i--)
171 io_tlb_list[i] = ++count;
172 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
173
174 /*
175 * Update the indices to avoid searching in the next round.
176 */
177 io_tlb_index = ((index + nslots) < io_tlb_nslabs
178 ? (index + nslots) : 0);
179
180 goto found;
181 }
182 index += stride;
183 if (index >= io_tlb_nslabs)
184 index = 0;
185 } while (index != wrap);
186
187 /*
188 * XXX What is a suitable recovery mechanism here? We cannot
189 * sleep because we are called from with in interrupts!
190 */
191 panic("map_single: could not allocate software IO TLB (%ld bytes)", size);
192 }
193 found:
194 spin_unlock_irqrestore(&io_tlb_lock, flags);
195
196 /*
197 * Save away the mapping from the original address to the DMA address. This is
198 * needed when we sync the memory. Then we sync the buffer if needed.
199 */
200 io_tlb_orig_addr[index] = buffer;
201 if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL)
202 memcpy(dma_addr, buffer, size);
203
204 return dma_addr;
205 }
206
207 /*
208 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
209 */
210 static void
unmap_single(struct pci_dev * hwdev,char * dma_addr,size_t size,int direction)211 unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
212 {
213 unsigned long flags;
214 int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
215 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
216 char *buffer = io_tlb_orig_addr[index];
217
218 /*
219 * First, sync the memory before unmapping the entry
220 */
221 if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL))
222 /*
223 * bounce... copy the data back into the original buffer * and delete the
224 * bounce buffer.
225 */
226 memcpy(buffer, dma_addr, size);
227
228 /*
229 * Return the buffer to the free list by setting the corresponding entries to
230 * indicate the number of contigous entries available. While returning the
231 * entries to the free list, we merge the entries with slots below and above the
232 * pool being returned.
233 */
234 spin_lock_irqsave(&io_tlb_lock, flags);
235 {
236 int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
237 io_tlb_list[index + nslots] : 0);
238 /*
239 * Step 1: return the slots to the free list, merging the slots with
240 * superceeding slots
241 */
242 for (i = index + nslots - 1; i >= index; i--)
243 io_tlb_list[i] = ++count;
244 /*
245 * Step 2: merge the returned slots with the preceeding slots, if
246 * available (non zero)
247 */
248 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
249 io_tlb_list[i]; i--)
250 io_tlb_list[i] = ++count;
251 }
252 spin_unlock_irqrestore(&io_tlb_lock, flags);
253 }
254
255 static void
sync_single(struct pci_dev * hwdev,char * dma_addr,size_t size,int direction)256 sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
257 {
258 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
259 char *buffer = io_tlb_orig_addr[index];
260
261 /*
262 * bounce... copy the data back into/from the original buffer
263 * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ?
264 */
265 if (direction == PCI_DMA_FROMDEVICE)
266 memcpy(buffer, dma_addr, size);
267 else if (direction == PCI_DMA_TODEVICE)
268 memcpy(dma_addr, buffer, size);
269 else
270 BUG();
271 }
272
273 void *
swiotlb_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)274 swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
275 {
276 unsigned long pci_addr;
277 int gfp = GFP_ATOMIC;
278 void *ret;
279
280 /*
281 * Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA
282 * mask says.
283 */
284 gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
285 ret = (void *)__get_free_pages(gfp, get_order(size));
286 if (!ret)
287 return NULL;
288
289 memset(ret, 0, size);
290 pci_addr = virt_to_phys(ret);
291 if (hwdev && (pci_addr & ~hwdev->dma_mask) != 0)
292 panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device");
293 *dma_handle = pci_addr;
294 return ret;
295 }
296
297 void
swiotlb_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)298 swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
299 {
300 free_pages((unsigned long) vaddr, get_order(size));
301 }
302
303 /*
304 * Map a single buffer of the indicated size for DMA in streaming mode. The PCI address
305 * to use is returned.
306 *
307 * Once the device is given the dma address, the device owns this memory until either
308 * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
309 */
310 dma_addr_t
swiotlb_map_single(struct pci_dev * hwdev,void * ptr,size_t size,int direction)311 swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
312 {
313 unsigned long pci_addr = virt_to_phys(ptr);
314
315 if (direction == PCI_DMA_NONE)
316 BUG();
317 /*
318 * Check if the PCI device can DMA to ptr... if so, just return ptr
319 */
320 if ((pci_addr & ~hwdev->dma_mask) == 0)
321 /*
322 * Device is bit capable of DMA'ing to the buffer... just return the PCI
323 * address of ptr
324 */
325 return pci_addr;
326
327 /*
328 * get a bounce buffer:
329 */
330 pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction));
331
332 /*
333 * Ensure that the address returned is DMA'ble:
334 */
335 if ((pci_addr & ~hwdev->dma_mask) != 0)
336 panic("map_single: bounce buffer is not DMA'ble");
337
338 return pci_addr;
339 }
340
341 /*
342 * Since DMA is i-cache coherent, any (complete) pages that were written via
343 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
344 * flush them when they get mapped into an executable vm-area.
345 */
346 static void
mark_clean(void * addr,size_t size)347 mark_clean (void *addr, size_t size)
348 {
349 unsigned long pg_addr, end;
350
351 pg_addr = PAGE_ALIGN((unsigned long) addr);
352 end = (unsigned long) addr + size;
353 while (pg_addr + PAGE_SIZE <= end) {
354 struct page *page = virt_to_page((void *)pg_addr);
355 set_bit(PG_arch_1, &page->flags);
356 pg_addr += PAGE_SIZE;
357 }
358 }
359
360 /*
361 * Unmap a single streaming mode DMA translation. The dma_addr and size must match what
362 * was provided for in a previous swiotlb_map_single call. All other usages are
363 * undefined.
364 *
365 * After this call, reads by the cpu to the buffer are guarenteed to see whatever the
366 * device wrote there.
367 */
368 void
swiotlb_unmap_single(struct pci_dev * hwdev,dma_addr_t pci_addr,size_t size,int direction)369 swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
370 {
371 char *dma_addr = phys_to_virt(pci_addr);
372
373 if (direction == PCI_DMA_NONE)
374 BUG();
375 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
376 unmap_single(hwdev, dma_addr, size, direction);
377 else if (direction == PCI_DMA_FROMDEVICE)
378 mark_clean(dma_addr, size);
379 }
380
381 /*
382 * Make physical memory consistent for a single streaming mode DMA translation after a
383 * transfer.
384 *
385 * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
386 * yet do not wish to teardown the PCI dma mapping, you must call this function before
387 * doing so. At the next point you give the PCI dma address back to the card, the device
388 * again owns the buffer.
389 */
390 void
swiotlb_sync_single(struct pci_dev * hwdev,dma_addr_t pci_addr,size_t size,int direction)391 swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
392 {
393 char *dma_addr = phys_to_virt(pci_addr);
394
395 if (direction == PCI_DMA_NONE)
396 BUG();
397 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
398 sync_single(hwdev, dma_addr, size, direction);
399 else if (direction == PCI_DMA_FROMDEVICE)
400 mark_clean(dma_addr, size);
401 }
402
403 /*
404 * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
405 * scather-gather version of the above swiotlb_map_single interface. Here the scatter
406 * gather list elements are each tagged with the appropriate dma address and length. They
407 * are obtained via sg_dma_{address,length}(SG).
408 *
409 * NOTE: An implementation may be able to use a smaller number of
410 * DMA address/length pairs than there are SG table elements.
411 * (for example via virtual mapping capabilities)
412 * The routine returns the number of addr/length pairs actually
413 * used, at most nents.
414 *
415 * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
416 */
417 int
swiotlb_map_sg(struct pci_dev * hwdev,struct scatterlist * sg,int nelems,int direction)418 swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
419 {
420 int i;
421
422 if (direction == PCI_DMA_NONE)
423 BUG();
424
425 for (i = 0; i < nelems; i++, sg++) {
426 void * virt_address = SG_ENT_VIRT_ADDRESS(sg);
427 unsigned long phys_address = virt_to_phys(virt_address);
428
429 sg->dma_length = sg->length;
430 if (phys_address & ~hwdev->dma_mask)
431 sg->dma_address = virt_to_phys(map_single(hwdev,
432 virt_address,
433 sg->length,
434 direction));
435 else
436 sg->dma_address = phys_address;
437 }
438 return nelems;
439 }
440
441 /*
442 * Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls
443 * here are the same as for swiotlb_unmap_single() above.
444 */
445 void
swiotlb_unmap_sg(struct pci_dev * hwdev,struct scatterlist * sg,int nelems,int direction)446 swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
447 {
448 int i;
449
450 if (direction == PCI_DMA_NONE)
451 BUG();
452
453 for (i = 0; i < nelems; i++, sg++)
454 if (sg->dma_address != virt_to_phys(SG_ENT_VIRT_ADDRESS(sg))) {
455 unmap_single(hwdev, phys_to_virt(sg->dma_address),
456 sg->dma_length, direction);
457 } else if (direction == PCI_DMA_FROMDEVICE)
458 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->length);
459 }
460
461 /*
462 * Make physical memory consistent for a set of streaming mode DMA translations after a
463 * transfer.
464 *
465 * The same as swiotlb_dma_sync_single but for a scatter-gather list, same rules and
466 * usage.
467 */
468 void
swiotlb_sync_sg(struct pci_dev * hwdev,struct scatterlist * sg,int nelems,int direction)469 swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
470 {
471 int i;
472
473 if (direction == PCI_DMA_NONE)
474 BUG();
475
476 for (i = 0; i < nelems; i++, sg++)
477 if (sg->dma_address != virt_to_phys(SG_ENT_VIRT_ADDRESS(sg)))
478 sync_single(hwdev, phys_to_virt(sg->dma_address),
479 sg->dma_length, direction);
480 }
481
482 /*
483 * Return whether the given PCI device DMA address mask can be supported properly. For
484 * example, if your device can only drive the low 24-bits during PCI bus mastering, then
485 * you would pass 0x00ffffff as the mask to this function.
486 */
487 int
swiotlb_pci_dma_supported(struct pci_dev * hwdev,u64 mask)488 swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask)
489 {
490 return 1;
491 }
492
493 EXPORT_SYMBOL(swiotlb_init);
494 EXPORT_SYMBOL(swiotlb_map_single);
495 EXPORT_SYMBOL(swiotlb_unmap_single);
496 EXPORT_SYMBOL(swiotlb_map_sg);
497 EXPORT_SYMBOL(swiotlb_unmap_sg);
498 EXPORT_SYMBOL(swiotlb_sync_single);
499 EXPORT_SYMBOL(swiotlb_sync_sg);
500 EXPORT_SYMBOL(swiotlb_alloc_consistent);
501 EXPORT_SYMBOL(swiotlb_free_consistent);
502 EXPORT_SYMBOL(swiotlb_pci_dma_supported);
503