1 /*
2 * linux/arch/alpha/kernel/pci_iommu.c
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/gfp.h>
9 #include <linux/bootmem.h>
10 #include <linux/export.h>
11 #include <linux/scatterlist.h>
12 #include <linux/log2.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/iommu-helper.h>
15
16 #include <asm/io.h>
17 #include <asm/hwrpb.h>
18
19 #include "proto.h"
20 #include "pci_impl.h"
21
22
23 #define DEBUG_ALLOC 0
24 #if DEBUG_ALLOC > 0
25 # define DBGA(args...) printk(KERN_DEBUG args)
26 #else
27 # define DBGA(args...)
28 #endif
29 #if DEBUG_ALLOC > 1
30 # define DBGA2(args...) printk(KERN_DEBUG args)
31 #else
32 # define DBGA2(args...)
33 #endif
34
35 #define DEBUG_NODIRECT 0
36
37 #define ISA_DMA_MASK 0x00ffffff
38
39 static inline unsigned long
mk_iommu_pte(unsigned long paddr)40 mk_iommu_pte(unsigned long paddr)
41 {
42 return (paddr >> (PAGE_SHIFT-1)) | 1;
43 }
44
45 /* Return the minimum of MAX or the first power of two larger
46 than main memory. */
47
48 unsigned long
size_for_memory(unsigned long max)49 size_for_memory(unsigned long max)
50 {
51 unsigned long mem = max_low_pfn << PAGE_SHIFT;
52 if (mem < max)
53 max = roundup_pow_of_two(mem);
54 return max;
55 }
56
57 struct pci_iommu_arena * __init
iommu_arena_new_node(int nid,struct pci_controller * hose,dma_addr_t base,unsigned long window_size,unsigned long align)58 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
59 unsigned long window_size, unsigned long align)
60 {
61 unsigned long mem_size;
62 struct pci_iommu_arena *arena;
63
64 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
65
66 /* Note that the TLB lookup logic uses bitwise concatenation,
67 not addition, so the required arena alignment is based on
68 the size of the window. Retain the align parameter so that
69 particular systems can over-align the arena. */
70 if (align < mem_size)
71 align = mem_size;
72
73
74 #ifdef CONFIG_DISCONTIGMEM
75
76 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
77 if (!NODE_DATA(nid) || !arena) {
78 printk("%s: couldn't allocate arena from node %d\n"
79 " falling back to system-wide allocation\n",
80 __func__, nid);
81 arena = alloc_bootmem(sizeof(*arena));
82 }
83
84 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
85 if (!NODE_DATA(nid) || !arena->ptes) {
86 printk("%s: couldn't allocate arena ptes from node %d\n"
87 " falling back to system-wide allocation\n",
88 __func__, nid);
89 arena->ptes = __alloc_bootmem(mem_size, align, 0);
90 }
91
92 #else /* CONFIG_DISCONTIGMEM */
93
94 arena = alloc_bootmem(sizeof(*arena));
95 arena->ptes = __alloc_bootmem(mem_size, align, 0);
96
97 #endif /* CONFIG_DISCONTIGMEM */
98
99 spin_lock_init(&arena->lock);
100 arena->hose = hose;
101 arena->dma_base = base;
102 arena->size = window_size;
103 arena->next_entry = 0;
104
105 /* Align allocations to a multiple of a page size. Not needed
106 unless there are chip bugs. */
107 arena->align_entry = 1;
108
109 return arena;
110 }
111
112 struct pci_iommu_arena * __init
iommu_arena_new(struct pci_controller * hose,dma_addr_t base,unsigned long window_size,unsigned long align)113 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
114 unsigned long window_size, unsigned long align)
115 {
116 return iommu_arena_new_node(0, hose, base, window_size, align);
117 }
118
119 /* Must be called with the arena lock held */
120 static long
iommu_arena_find_pages(struct device * dev,struct pci_iommu_arena * arena,long n,long mask)121 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
122 long n, long mask)
123 {
124 unsigned long *ptes;
125 long i, p, nent;
126 int pass = 0;
127 unsigned long base;
128 unsigned long boundary_size;
129
130 base = arena->dma_base >> PAGE_SHIFT;
131 if (dev) {
132 boundary_size = dma_get_seg_boundary(dev) + 1;
133 boundary_size >>= PAGE_SHIFT;
134 } else {
135 boundary_size = 1UL << (32 - PAGE_SHIFT);
136 }
137
138 /* Search forward for the first mask-aligned sequence of N free ptes */
139 ptes = arena->ptes;
140 nent = arena->size >> PAGE_SHIFT;
141 p = ALIGN(arena->next_entry, mask + 1);
142 i = 0;
143
144 again:
145 while (i < n && p+i < nent) {
146 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
147 p = ALIGN(p + 1, mask + 1);
148 goto again;
149 }
150
151 if (ptes[p+i])
152 p = ALIGN(p + i + 1, mask + 1), i = 0;
153 else
154 i = i + 1;
155 }
156
157 if (i < n) {
158 if (pass < 1) {
159 /*
160 * Reached the end. Flush the TLB and restart
161 * the search from the beginning.
162 */
163 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
164
165 pass++;
166 p = 0;
167 i = 0;
168 goto again;
169 } else
170 return -1;
171 }
172
173 /* Success. It's the responsibility of the caller to mark them
174 in use before releasing the lock */
175 return p;
176 }
177
178 static long
iommu_arena_alloc(struct device * dev,struct pci_iommu_arena * arena,long n,unsigned int align)179 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
180 unsigned int align)
181 {
182 unsigned long flags;
183 unsigned long *ptes;
184 long i, p, mask;
185
186 spin_lock_irqsave(&arena->lock, flags);
187
188 /* Search for N empty ptes */
189 ptes = arena->ptes;
190 mask = max(align, arena->align_entry) - 1;
191 p = iommu_arena_find_pages(dev, arena, n, mask);
192 if (p < 0) {
193 spin_unlock_irqrestore(&arena->lock, flags);
194 return -1;
195 }
196
197 /* Success. Mark them all in use, ie not zero and invalid
198 for the iommu tlb that could load them from under us.
199 The chip specific bits will fill this in with something
200 kosher when we return. */
201 for (i = 0; i < n; ++i)
202 ptes[p+i] = IOMMU_INVALID_PTE;
203
204 arena->next_entry = p + n;
205 spin_unlock_irqrestore(&arena->lock, flags);
206
207 return p;
208 }
209
210 static void
iommu_arena_free(struct pci_iommu_arena * arena,long ofs,long n)211 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
212 {
213 unsigned long *p;
214 long i;
215
216 p = arena->ptes + ofs;
217 for (i = 0; i < n; ++i)
218 p[i] = 0;
219 }
220
221 /*
222 * True if the machine supports DAC addressing, and DEV can
223 * make use of it given MASK.
224 */
pci_dac_dma_supported(struct pci_dev * dev,u64 mask)225 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
226 {
227 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
228 int ok = 1;
229
230 /* If this is not set, the machine doesn't support DAC at all. */
231 if (dac_offset == 0)
232 ok = 0;
233
234 /* The device has to be able to address our DAC bit. */
235 if ((dac_offset & dev->dma_mask) != dac_offset)
236 ok = 0;
237
238 /* If both conditions above are met, we are fine. */
239 DBGA("pci_dac_dma_supported %s from %p\n",
240 ok ? "yes" : "no", __builtin_return_address(0));
241
242 return ok;
243 }
244
245 /* Map a single buffer of the indicated size for PCI DMA in streaming
246 mode. The 32-bit PCI bus mastering address to use is returned.
247 Once the device is given the dma address, the device owns this memory
248 until either pci_unmap_single or pci_dma_sync_single is performed. */
249
250 static dma_addr_t
pci_map_single_1(struct pci_dev * pdev,void * cpu_addr,size_t size,int dac_allowed)251 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
252 int dac_allowed)
253 {
254 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
255 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
256 struct pci_iommu_arena *arena;
257 long npages, dma_ofs, i;
258 unsigned long paddr;
259 dma_addr_t ret;
260 unsigned int align = 0;
261 struct device *dev = pdev ? &pdev->dev : NULL;
262
263 paddr = __pa(cpu_addr);
264
265 #if !DEBUG_NODIRECT
266 /* First check to see if we can use the direct map window. */
267 if (paddr + size + __direct_map_base - 1 <= max_dma
268 && paddr + size <= __direct_map_size) {
269 ret = paddr + __direct_map_base;
270
271 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
272 cpu_addr, size, ret, __builtin_return_address(0));
273
274 return ret;
275 }
276 #endif
277
278 /* Next, use DAC if selected earlier. */
279 if (dac_allowed) {
280 ret = paddr + alpha_mv.pci_dac_offset;
281
282 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
283 cpu_addr, size, ret, __builtin_return_address(0));
284
285 return ret;
286 }
287
288 /* If the machine doesn't define a pci_tbi routine, we have to
289 assume it doesn't support sg mapping, and, since we tried to
290 use direct_map above, it now must be considered an error. */
291 if (! alpha_mv.mv_pci_tbi) {
292 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
293 return 0;
294 }
295
296 arena = hose->sg_pci;
297 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
298 arena = hose->sg_isa;
299
300 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
301
302 /* Force allocation to 64KB boundary for ISA bridges. */
303 if (pdev && pdev == isa_bridge)
304 align = 8;
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
306 if (dma_ofs < 0) {
307 printk(KERN_WARNING "pci_map_single failed: "
308 "could not allocate dma page tables\n");
309 return 0;
310 }
311
312 paddr &= PAGE_MASK;
313 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
314 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
315
316 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
317 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
318
319 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
320 cpu_addr, size, npages, ret, __builtin_return_address(0));
321
322 return ret;
323 }
324
325 /* Helper for generic DMA-mapping functions. */
alpha_gendev_to_pci(struct device * dev)326 static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
327 {
328 if (dev && dev->bus == &pci_bus_type)
329 return to_pci_dev(dev);
330
331 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
332 BUG() otherwise. */
333 BUG_ON(!isa_bridge);
334
335 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
336 bridge is bus master then). */
337 if (!dev || !dev->dma_mask || !*dev->dma_mask)
338 return isa_bridge;
339
340 /* For EISA bus masters, return isa_bridge (it might have smaller
341 dma_mask due to wiring limitations). */
342 if (*dev->dma_mask >= isa_bridge->dma_mask)
343 return isa_bridge;
344
345 /* This assumes ISA bus master with dma_mask 0xffffff. */
346 return NULL;
347 }
348
alpha_pci_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)349 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
350 unsigned long offset, size_t size,
351 enum dma_data_direction dir,
352 struct dma_attrs *attrs)
353 {
354 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
355 int dac_allowed;
356
357 if (dir == PCI_DMA_NONE)
358 BUG();
359
360 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
361 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
362 size, dac_allowed);
363 }
364
365 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
366 SIZE must match what was provided for in a previous pci_map_single
367 call. All other usages are undefined. After this call, reads by
368 the cpu to the buffer are guaranteed to see whatever the device
369 wrote there. */
370
alpha_pci_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)371 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
372 size_t size, enum dma_data_direction dir,
373 struct dma_attrs *attrs)
374 {
375 unsigned long flags;
376 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
377 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
378 struct pci_iommu_arena *arena;
379 long dma_ofs, npages;
380
381 if (dir == PCI_DMA_NONE)
382 BUG();
383
384 if (dma_addr >= __direct_map_base
385 && dma_addr < __direct_map_base + __direct_map_size) {
386 /* Nothing to do. */
387
388 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
389 dma_addr, size, __builtin_return_address(0));
390
391 return;
392 }
393
394 if (dma_addr > 0xffffffff) {
395 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
396 dma_addr, size, __builtin_return_address(0));
397 return;
398 }
399
400 arena = hose->sg_pci;
401 if (!arena || dma_addr < arena->dma_base)
402 arena = hose->sg_isa;
403
404 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
405 if (dma_ofs * PAGE_SIZE >= arena->size) {
406 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
407 " base %llx size %x\n",
408 dma_addr, arena->dma_base, arena->size);
409 return;
410 BUG();
411 }
412
413 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
414
415 spin_lock_irqsave(&arena->lock, flags);
416
417 iommu_arena_free(arena, dma_ofs, npages);
418
419 /* If we're freeing ptes above the `next_entry' pointer (they
420 may have snuck back into the TLB since the last wrap flush),
421 we need to flush the TLB before reallocating the latter. */
422 if (dma_ofs >= arena->next_entry)
423 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
424
425 spin_unlock_irqrestore(&arena->lock, flags);
426
427 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
428 dma_addr, size, npages, __builtin_return_address(0));
429 }
430
431 /* Allocate and map kernel buffer using consistent mode DMA for PCI
432 device. Returns non-NULL cpu-view pointer to the buffer if
433 successful and sets *DMA_ADDRP to the pci side dma address as well,
434 else DMA_ADDRP is undefined. */
435
alpha_pci_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addrp,gfp_t gfp,struct dma_attrs * attrs)436 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
437 dma_addr_t *dma_addrp, gfp_t gfp,
438 struct dma_attrs *attrs)
439 {
440 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
441 void *cpu_addr;
442 long order = get_order(size);
443
444 gfp &= ~GFP_DMA;
445
446 try_again:
447 cpu_addr = (void *)__get_free_pages(gfp, order);
448 if (! cpu_addr) {
449 printk(KERN_INFO "pci_alloc_consistent: "
450 "get_free_pages failed from %p\n",
451 __builtin_return_address(0));
452 /* ??? Really atomic allocation? Otherwise we could play
453 with vmalloc and sg if we can't find contiguous memory. */
454 return NULL;
455 }
456 memset(cpu_addr, 0, size);
457
458 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
459 if (*dma_addrp == 0) {
460 free_pages((unsigned long)cpu_addr, order);
461 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
462 return NULL;
463 /* The address doesn't fit required mask and we
464 do not have iommu. Try again with GFP_DMA. */
465 gfp |= GFP_DMA;
466 goto try_again;
467 }
468
469 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
470 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
471
472 return cpu_addr;
473 }
474
475 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
476 be values that were returned from pci_alloc_consistent. SIZE must
477 be the same as what as passed into pci_alloc_consistent.
478 References to the memory and mappings associated with CPU_ADDR or
479 DMA_ADDR past this call are illegal. */
480
alpha_pci_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,struct dma_attrs * attrs)481 static void alpha_pci_free_coherent(struct device *dev, size_t size,
482 void *cpu_addr, dma_addr_t dma_addr,
483 struct dma_attrs *attrs)
484 {
485 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
486 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
487 free_pages((unsigned long)cpu_addr, get_order(size));
488
489 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
490 dma_addr, size, __builtin_return_address(0));
491 }
492
493 /* Classify the elements of the scatterlist. Write dma_address
494 of each element with:
495 0 : Followers all physically adjacent.
496 1 : Followers all virtually adjacent.
497 -1 : Not leader, physically adjacent to previous.
498 -2 : Not leader, virtually adjacent to previous.
499 Write dma_length of each leader with the combined lengths of
500 the mergable followers. */
501
502 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
503 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
504
505 static void
sg_classify(struct device * dev,struct scatterlist * sg,struct scatterlist * end,int virt_ok)506 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
507 int virt_ok)
508 {
509 unsigned long next_paddr;
510 struct scatterlist *leader;
511 long leader_flag, leader_length;
512 unsigned int max_seg_size;
513
514 leader = sg;
515 leader_flag = 0;
516 leader_length = leader->length;
517 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
518
519 /* we will not marge sg without device. */
520 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
521 for (++sg; sg < end; ++sg) {
522 unsigned long addr, len;
523 addr = SG_ENT_PHYS_ADDRESS(sg);
524 len = sg->length;
525
526 if (leader_length + len > max_seg_size)
527 goto new_segment;
528
529 if (next_paddr == addr) {
530 sg->dma_address = -1;
531 leader_length += len;
532 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
533 sg->dma_address = -2;
534 leader_flag = 1;
535 leader_length += len;
536 } else {
537 new_segment:
538 leader->dma_address = leader_flag;
539 leader->dma_length = leader_length;
540 leader = sg;
541 leader_flag = 0;
542 leader_length = len;
543 }
544
545 next_paddr = addr + len;
546 }
547
548 leader->dma_address = leader_flag;
549 leader->dma_length = leader_length;
550 }
551
552 /* Given a scatterlist leader, choose an allocation method and fill
553 in the blanks. */
554
555 static int
sg_fill(struct device * dev,struct scatterlist * leader,struct scatterlist * end,struct scatterlist * out,struct pci_iommu_arena * arena,dma_addr_t max_dma,int dac_allowed)556 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
557 struct scatterlist *out, struct pci_iommu_arena *arena,
558 dma_addr_t max_dma, int dac_allowed)
559 {
560 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
561 long size = leader->dma_length;
562 struct scatterlist *sg;
563 unsigned long *ptes;
564 long npages, dma_ofs, i;
565
566 #if !DEBUG_NODIRECT
567 /* If everything is physically contiguous, and the addresses
568 fall into the direct-map window, use it. */
569 if (leader->dma_address == 0
570 && paddr + size + __direct_map_base - 1 <= max_dma
571 && paddr + size <= __direct_map_size) {
572 out->dma_address = paddr + __direct_map_base;
573 out->dma_length = size;
574
575 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
576 __va(paddr), size, out->dma_address);
577
578 return 0;
579 }
580 #endif
581
582 /* If physically contiguous and DAC is available, use it. */
583 if (leader->dma_address == 0 && dac_allowed) {
584 out->dma_address = paddr + alpha_mv.pci_dac_offset;
585 out->dma_length = size;
586
587 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
588 __va(paddr), size, out->dma_address);
589
590 return 0;
591 }
592
593 /* Otherwise, we'll use the iommu to make the pages virtually
594 contiguous. */
595
596 paddr &= ~PAGE_MASK;
597 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
598 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
599 if (dma_ofs < 0) {
600 /* If we attempted a direct map above but failed, die. */
601 if (leader->dma_address == 0)
602 return -1;
603
604 /* Otherwise, break up the remaining virtually contiguous
605 hunks into individual direct maps and retry. */
606 sg_classify(dev, leader, end, 0);
607 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
608 }
609
610 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
611 out->dma_length = size;
612
613 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
614 __va(paddr), size, out->dma_address, npages);
615
616 /* All virtually contiguous. We need to find the length of each
617 physically contiguous subsegment to fill in the ptes. */
618 ptes = &arena->ptes[dma_ofs];
619 sg = leader;
620 do {
621 #if DEBUG_ALLOC > 0
622 struct scatterlist *last_sg = sg;
623 #endif
624
625 size = sg->length;
626 paddr = SG_ENT_PHYS_ADDRESS(sg);
627
628 while (sg+1 < end && (int) sg[1].dma_address == -1) {
629 size += sg[1].length;
630 sg++;
631 }
632
633 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
634
635 paddr &= PAGE_MASK;
636 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
637 *ptes++ = mk_iommu_pte(paddr);
638
639 #if DEBUG_ALLOC > 0
640 DBGA(" (%ld) [%p,%x] np %ld\n",
641 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
642 last_sg->length, npages);
643 while (++last_sg <= sg) {
644 DBGA(" (%ld) [%p,%x] cont\n",
645 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
646 last_sg->length);
647 }
648 #endif
649 } while (++sg < end && (int) sg->dma_address < 0);
650
651 return 1;
652 }
653
alpha_pci_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)654 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
655 int nents, enum dma_data_direction dir,
656 struct dma_attrs *attrs)
657 {
658 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
659 struct scatterlist *start, *end, *out;
660 struct pci_controller *hose;
661 struct pci_iommu_arena *arena;
662 dma_addr_t max_dma;
663 int dac_allowed;
664
665 if (dir == PCI_DMA_NONE)
666 BUG();
667
668 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
669
670 /* Fast path single entry scatterlists. */
671 if (nents == 1) {
672 sg->dma_length = sg->length;
673 sg->dma_address
674 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
675 sg->length, dac_allowed);
676 return sg->dma_address != 0;
677 }
678
679 start = sg;
680 end = sg + nents;
681
682 /* First, prepare information about the entries. */
683 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
684
685 /* Second, figure out where we're going to map things. */
686 if (alpha_mv.mv_pci_tbi) {
687 hose = pdev ? pdev->sysdata : pci_isa_hose;
688 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
689 arena = hose->sg_pci;
690 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
691 arena = hose->sg_isa;
692 } else {
693 max_dma = -1;
694 arena = NULL;
695 hose = NULL;
696 }
697
698 /* Third, iterate over the scatterlist leaders and allocate
699 dma space as needed. */
700 for (out = sg; sg < end; ++sg) {
701 if ((int) sg->dma_address < 0)
702 continue;
703 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
704 goto error;
705 out++;
706 }
707
708 /* Mark the end of the list for pci_unmap_sg. */
709 if (out < end)
710 out->dma_length = 0;
711
712 if (out - start == 0)
713 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
714 DBGA("pci_map_sg: %ld entries\n", out - start);
715
716 return out - start;
717
718 error:
719 printk(KERN_WARNING "pci_map_sg failed: "
720 "could not allocate dma page tables\n");
721
722 /* Some allocation failed while mapping the scatterlist
723 entries. Unmap them now. */
724 if (out > start)
725 pci_unmap_sg(pdev, start, out - start, dir);
726 return 0;
727 }
728
729 /* Unmap a set of streaming mode DMA translations. Again, cpu read
730 rules concerning calls here are the same as for pci_unmap_single()
731 above. */
732
alpha_pci_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)733 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
734 int nents, enum dma_data_direction dir,
735 struct dma_attrs *attrs)
736 {
737 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
738 unsigned long flags;
739 struct pci_controller *hose;
740 struct pci_iommu_arena *arena;
741 struct scatterlist *end;
742 dma_addr_t max_dma;
743 dma_addr_t fbeg, fend;
744
745 if (dir == PCI_DMA_NONE)
746 BUG();
747
748 if (! alpha_mv.mv_pci_tbi)
749 return;
750
751 hose = pdev ? pdev->sysdata : pci_isa_hose;
752 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
753 arena = hose->sg_pci;
754 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
755 arena = hose->sg_isa;
756
757 fbeg = -1, fend = 0;
758
759 spin_lock_irqsave(&arena->lock, flags);
760
761 for (end = sg + nents; sg < end; ++sg) {
762 dma_addr_t addr;
763 size_t size;
764 long npages, ofs;
765 dma_addr_t tend;
766
767 addr = sg->dma_address;
768 size = sg->dma_length;
769 if (!size)
770 break;
771
772 if (addr > 0xffffffff) {
773 /* It's a DAC address -- nothing to do. */
774 DBGA(" (%ld) DAC [%llx,%zx]\n",
775 sg - end + nents, addr, size);
776 continue;
777 }
778
779 if (addr >= __direct_map_base
780 && addr < __direct_map_base + __direct_map_size) {
781 /* Nothing to do. */
782 DBGA(" (%ld) direct [%llx,%zx]\n",
783 sg - end + nents, addr, size);
784 continue;
785 }
786
787 DBGA(" (%ld) sg [%llx,%zx]\n",
788 sg - end + nents, addr, size);
789
790 npages = iommu_num_pages(addr, size, PAGE_SIZE);
791 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
792 iommu_arena_free(arena, ofs, npages);
793
794 tend = addr + size - 1;
795 if (fbeg > addr) fbeg = addr;
796 if (fend < tend) fend = tend;
797 }
798
799 /* If we're freeing ptes above the `next_entry' pointer (they
800 may have snuck back into the TLB since the last wrap flush),
801 we need to flush the TLB before reallocating the latter. */
802 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
803 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
804
805 spin_unlock_irqrestore(&arena->lock, flags);
806
807 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
808 }
809
810 /* Return whether the given PCI device DMA address mask can be
811 supported properly. */
812
alpha_pci_supported(struct device * dev,u64 mask)813 static int alpha_pci_supported(struct device *dev, u64 mask)
814 {
815 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
816 struct pci_controller *hose;
817 struct pci_iommu_arena *arena;
818
819 /* If there exists a direct map, and the mask fits either
820 the entire direct mapped space or the total system memory as
821 shifted by the map base */
822 if (__direct_map_size != 0
823 && (__direct_map_base + __direct_map_size - 1 <= mask ||
824 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
825 return 1;
826
827 /* Check that we have a scatter-gather arena that fits. */
828 hose = pdev ? pdev->sysdata : pci_isa_hose;
829 arena = hose->sg_isa;
830 if (arena && arena->dma_base + arena->size - 1 <= mask)
831 return 1;
832 arena = hose->sg_pci;
833 if (arena && arena->dma_base + arena->size - 1 <= mask)
834 return 1;
835
836 /* As last resort try ZONE_DMA. */
837 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
838 return 1;
839
840 return 0;
841 }
842
843
844 /*
845 * AGP GART extensions to the IOMMU
846 */
847 int
iommu_reserve(struct pci_iommu_arena * arena,long pg_count,long align_mask)848 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
849 {
850 unsigned long flags;
851 unsigned long *ptes;
852 long i, p;
853
854 if (!arena) return -EINVAL;
855
856 spin_lock_irqsave(&arena->lock, flags);
857
858 /* Search for N empty ptes. */
859 ptes = arena->ptes;
860 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
861 if (p < 0) {
862 spin_unlock_irqrestore(&arena->lock, flags);
863 return -1;
864 }
865
866 /* Success. Mark them all reserved (ie not zero and invalid)
867 for the iommu tlb that could load them from under us.
868 They will be filled in with valid bits by _bind() */
869 for (i = 0; i < pg_count; ++i)
870 ptes[p+i] = IOMMU_RESERVED_PTE;
871
872 arena->next_entry = p + pg_count;
873 spin_unlock_irqrestore(&arena->lock, flags);
874
875 return p;
876 }
877
878 int
iommu_release(struct pci_iommu_arena * arena,long pg_start,long pg_count)879 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
880 {
881 unsigned long *ptes;
882 long i;
883
884 if (!arena) return -EINVAL;
885
886 ptes = arena->ptes;
887
888 /* Make sure they're all reserved first... */
889 for(i = pg_start; i < pg_start + pg_count; i++)
890 if (ptes[i] != IOMMU_RESERVED_PTE)
891 return -EBUSY;
892
893 iommu_arena_free(arena, pg_start, pg_count);
894 return 0;
895 }
896
897 int
iommu_bind(struct pci_iommu_arena * arena,long pg_start,long pg_count,struct page ** pages)898 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
899 struct page **pages)
900 {
901 unsigned long flags;
902 unsigned long *ptes;
903 long i, j;
904
905 if (!arena) return -EINVAL;
906
907 spin_lock_irqsave(&arena->lock, flags);
908
909 ptes = arena->ptes;
910
911 for(j = pg_start; j < pg_start + pg_count; j++) {
912 if (ptes[j] != IOMMU_RESERVED_PTE) {
913 spin_unlock_irqrestore(&arena->lock, flags);
914 return -EBUSY;
915 }
916 }
917
918 for(i = 0, j = pg_start; i < pg_count; i++, j++)
919 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
920
921 spin_unlock_irqrestore(&arena->lock, flags);
922
923 return 0;
924 }
925
926 int
iommu_unbind(struct pci_iommu_arena * arena,long pg_start,long pg_count)927 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
928 {
929 unsigned long *p;
930 long i;
931
932 if (!arena) return -EINVAL;
933
934 p = arena->ptes + pg_start;
935 for(i = 0; i < pg_count; i++)
936 p[i] = IOMMU_RESERVED_PTE;
937
938 return 0;
939 }
940
alpha_pci_mapping_error(struct device * dev,dma_addr_t dma_addr)941 static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
942 {
943 return dma_addr == 0;
944 }
945
alpha_pci_set_mask(struct device * dev,u64 mask)946 static int alpha_pci_set_mask(struct device *dev, u64 mask)
947 {
948 if (!dev->dma_mask ||
949 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
950 return -EIO;
951
952 *dev->dma_mask = mask;
953 return 0;
954 }
955
956 struct dma_map_ops alpha_pci_ops = {
957 .alloc = alpha_pci_alloc_coherent,
958 .free = alpha_pci_free_coherent,
959 .map_page = alpha_pci_map_page,
960 .unmap_page = alpha_pci_unmap_page,
961 .map_sg = alpha_pci_map_sg,
962 .unmap_sg = alpha_pci_unmap_sg,
963 .mapping_error = alpha_pci_mapping_error,
964 .dma_supported = alpha_pci_supported,
965 .set_dma_mask = alpha_pci_set_mask,
966 };
967
968 struct dma_map_ops *dma_ops = &alpha_pci_ops;
969 EXPORT_SYMBOL(dma_ops);
970