1 /*
2 * Dynamic DMA mapping support for AMD Hammer.
3 *
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * $Id: pci-gart.c,v 1.32 2004/02/27 18:30:19 ak Exp $
12 */
13
14 #include <linux/config.h>
15 #include <linux/types.h>
16 #include <linux/ctype.h>
17 #include <linux/agp_backend.h>
18 #include <linux/init.h>
19 #include <linux/mm.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 #include <linux/module.h>
25 #include <asm/io.h>
26 #include <asm/mtrr.h>
27 #include <asm/bitops.h>
28 #include <asm/pgtable.h>
29 #include <asm/proto.h>
30 #include "pci-x86_64.h"
31
32 unsigned long iommu_bus_base; /* GART remapping area (physical) */
33 static unsigned long iommu_size; /* size of remapping area bytes */
34 static unsigned long iommu_pages; /* .. and in pages */
35
36 u32 *iommu_gatt_base; /* Remapping table */
37
38 int no_iommu;
39 static int no_agp;
40 #ifdef CONFIG_IOMMU_DEBUG
41 int force_mmu = 1;
42 #else
43 int force_mmu = 0;
44 #endif
45 int iommu_fullflush = 1;
46
47 extern int fallback_aper_order;
48 extern int fallback_aper_force;
49
50 #ifdef CONFIG_SWIOTLB
51 extern char *io_tlb_start, *io_tlb_end;
52 #endif
53
54 /* Allocation bitmap for the remapping area */
55 static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
56 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
57
58 #define GPTE_VALID 1
59 #define GPTE_COHERENT 2
60 #define GPTE_ENCODE(x) (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
61 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
62
63 #define for_all_nb(dev) \
64 pci_for_each_dev(dev) \
65 if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device==0x1103 &&\
66 dev->bus->number == 0 && PCI_FUNC(dev->devfn) == 3 && \
67 (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
68
69 #define EMERGENCY_PAGES 32 /* = 128KB */
70
71 #ifdef CONFIG_AGP
72 extern int agp_init(void);
73 #define AGPEXTERN extern
74 #else
75 #define AGPEXTERN
76 #endif
77
78 /* backdoor interface to AGP driver */
79 AGPEXTERN int agp_memory_reserved;
80 AGPEXTERN __u32 *agp_gatt_table;
81
82 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83
84 static struct pci_dev *northbridges[NR_CPUS + 1];
85 static u32 northbridge_flush_word[NR_CPUS + 1];
86 static int need_flush; /* global flush state. set for each gart wrap */
alloc_iommu(int size)87 static unsigned long alloc_iommu(int size)
88 {
89 unsigned long offset, flags;
90
91 spin_lock_irqsave(&iommu_bitmap_lock, flags);
92 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
93 if (offset == -1) {
94 need_flush = 1;
95 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
96 }
97 if (offset != -1) {
98 set_bit_string(iommu_gart_bitmap, offset, size);
99 next_bit = offset+size;
100 if (next_bit >= iommu_pages) {
101 need_flush = 1;
102 next_bit = 0;
103 }
104 }
105 if (iommu_fullflush)
106 need_flush = 1;
107 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
108 return offset;
109 }
110
free_iommu(unsigned long offset,int size)111 static void free_iommu(unsigned long offset, int size)
112 {
113 unsigned long flags;
114 spin_lock_irqsave(&iommu_bitmap_lock, flags);
115 clear_bit_string(iommu_gart_bitmap, offset, size);
116 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
117 }
118
119
120 /*
121 * Use global flush state to avoid races with multiple flushers.
122 */
__flush_gart(void)123 static void __flush_gart(void)
124 {
125 unsigned long flags;
126 int flushed = 0;
127 int i;
128
129 spin_lock_irqsave(&iommu_bitmap_lock, flags);
130 /* recheck flush count inside lock */
131 if (need_flush) {
132 for (i = 0; northbridges[i]; i++) {
133 u32 w;
134 pci_write_config_dword(northbridges[i], 0x9c,
135 northbridge_flush_word[i] | 1);
136 do {
137 pci_read_config_dword(northbridges[i], 0x9c, &w);
138 } while (w & 1);
139 flushed++;
140 }
141 if (!flushed)
142 printk("nothing to flush?\n");
143 need_flush = 0;
144 }
145 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
146 }
147
flush_gart(void)148 static inline void flush_gart(void)
149 {
150 if (need_flush)
151 __flush_gart();
152 }
153
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)154 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
155 dma_addr_t *dma_handle)
156 {
157 void *memory;
158 int gfp = GFP_ATOMIC;
159 int i;
160 unsigned long iommu_page;
161
162 if (hwdev == NULL || hwdev->dma_mask < 0xffffffff || (no_iommu && !swiotlb))
163 gfp |= GFP_DMA;
164
165 /*
166 * First try to allocate continuous and use directly if already
167 * in lowmem.
168 */
169 size = round_up(size, PAGE_SIZE);
170 memory = (void *)__get_free_pages(gfp, get_order(size));
171 if (memory == NULL) {
172 return NULL;
173 } else {
174 int high = 0, mmu;
175 if (((unsigned long)virt_to_bus(memory) + size) > 0xffffffffUL)
176 high = 1;
177 mmu = high;
178 if (force_mmu && !(gfp & GFP_DMA))
179 mmu = 1;
180 if (no_iommu) {
181 #ifdef CONFIG_SWIOTLB
182 if (swiotlb && high && hwdev) {
183 unsigned long dma_mask = 0;
184 if (hwdev->dma_mask == ~0UL) {
185 hwdev->dma_mask = 0xffffffff;
186 dma_mask = ~0UL;
187 }
188 *dma_handle = swiotlb_map_single(hwdev, memory, size,
189 PCI_DMA_FROMDEVICE);
190 if (dma_mask)
191 hwdev->dma_mask = dma_mask;
192 memset(phys_to_virt(*dma_handle), 0, size);
193 free_pages((unsigned long)memory, get_order(size));
194 return phys_to_virt(*dma_handle);
195 }
196 #endif
197 if (high) goto error;
198 mmu = 0;
199 }
200 memset(memory, 0, size);
201 if (!mmu) {
202 *dma_handle = virt_to_bus(memory);
203 return memory;
204 }
205 }
206
207 size >>= PAGE_SHIFT;
208
209 iommu_page = alloc_iommu(size);
210 if (iommu_page == -1)
211 goto error;
212
213 /* Fill in the GATT, allocating pages as needed. */
214 for (i = 0; i < size; i++) {
215 unsigned long phys_mem;
216 void *mem = memory + i*PAGE_SIZE;
217 if (i > 0)
218 atomic_inc(&virt_to_page(mem)->count);
219 phys_mem = virt_to_phys(mem);
220 BUG_ON(phys_mem & ~PHYSICAL_PAGE_MASK);
221 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
222 }
223
224 flush_gart();
225 *dma_handle = iommu_bus_base + (iommu_page << PAGE_SHIFT);
226 return memory;
227
228 error:
229 free_pages((unsigned long)memory, get_order(size));
230 return NULL;
231 }
232
233 /*
234 * Unmap consistent memory.
235 * The caller must ensure that the device has finished accessing the mapping.
236 */
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t bus)237 void pci_free_consistent(struct pci_dev *hwdev, size_t size,
238 void *vaddr, dma_addr_t bus)
239 {
240 unsigned long iommu_page;
241
242 size = round_up(size, PAGE_SIZE);
243 #ifdef CONFIG_SWIOTLB
244 /* Overlap should not happen */
245 if (swiotlb && vaddr >= (void *)io_tlb_start &&
246 vaddr < (void *)io_tlb_end) {
247 swiotlb_unmap_single (hwdev, bus, size, PCI_DMA_TODEVICE);
248 return;
249 }
250 #endif
251 if (bus >= iommu_bus_base && bus < iommu_bus_base + iommu_size) {
252 unsigned pages = size >> PAGE_SHIFT;
253 iommu_page = (bus - iommu_bus_base) >> PAGE_SHIFT;
254 vaddr = __va(GPTE_DECODE(iommu_gatt_base[iommu_page]));
255 int i;
256 for (i = 0; i < pages; i++) {
257 u64 pte = iommu_gatt_base[iommu_page + i];
258 BUG_ON((pte & GPTE_VALID) == 0);
259 iommu_gatt_base[iommu_page + i] = 0;
260 }
261 free_iommu(iommu_page, pages);
262 }
263 free_pages((unsigned long)vaddr, get_order(size));
264 }
265
266 #ifdef CONFIG_IOMMU_LEAK
267 /* Debugging aid for drivers that don't free their IOMMU tables */
268 static void **iommu_leak_tab;
269 static int leak_trace;
270 int iommu_leak_pages = 20;
271 extern unsigned long printk_address(unsigned long);
dump_leak(void)272 void dump_leak(void)
273 {
274 int i;
275 static int dump;
276 if (dump || !iommu_leak_tab) return;
277 dump = 1;
278 show_stack(NULL);
279 /* Very crude. dump some from the end of the table too */
280 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
281 for (i = 0; i < iommu_leak_pages; i+=2) {
282 printk("%lu: ", iommu_pages-i);
283 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
284 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
285 }
286 printk("\n");
287 }
288 #endif
289
iommu_full(struct pci_dev * dev,void * addr,size_t size,int dir)290 static void iommu_full(struct pci_dev *dev, void *addr, size_t size, int dir)
291 {
292 /*
293 * Ran out of IOMMU space for this operation. This is very bad.
294 * Unfortunately the drivers cannot handle this operation properly.
295 * Return some non mapped prereserved space in the aperture and
296 * let the Northbridge deal with it. This will result in garbage
297 * in the IO operation. When the size exceeds the prereserved spa * memory corruption will occur or random memory will be DMAed
298 * out. Hopefully no network devices use single mappings that big.
299 */
300
301 printk(KERN_ERR
302 "PCI-DMA: Error: ran out out IOMMU space for %p size %lu at device %s[%s]\n",
303 addr,size, dev ? dev->name : "?", dev ? dev->slot_name : "?");
304
305 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
306 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
307 panic("PCI-DMA: Memory will be corrupted\n");
308 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
309 panic("PCI-DMA: Random memory will be DMAed\n");
310 }
311
312 #ifdef CONFIG_IOMMU_LEAK
313 dump_leak();
314 #endif
315 }
316
need_iommu(struct pci_dev * dev,unsigned long addr,size_t size)317 static inline int need_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
318 {
319 u64 mask = dev ? dev->dma_mask : 0xffffffff;
320 int high = (~mask & (unsigned long)(addr + size)) != 0;
321 int mmu = high;
322 if (force_mmu)
323 mmu = 1;
324 if (no_iommu) {
325 if (high)
326 panic("pci_map_single: high address but no IOMMU.\n");
327 mmu = 0;
328 }
329 return mmu;
330 }
331
pci_map_single(struct pci_dev * dev,void * addr,size_t size,int dir)332 dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size,
333 int dir)
334 {
335 unsigned long iommu_page;
336 unsigned long phys_mem, bus;
337 int i, npages;
338
339 BUG_ON(dir == PCI_DMA_NONE);
340
341 #ifdef CONFIG_SWIOTLB
342 if (swiotlb)
343 return swiotlb_map_single(dev,addr,size,dir);
344 #endif
345
346
347 phys_mem = virt_to_phys(addr);
348 if (!need_iommu(dev, phys_mem, size))
349 return phys_mem;
350
351 npages = round_up(size + ((u64)addr & ~PAGE_MASK), PAGE_SIZE) >> PAGE_SHIFT;
352
353 iommu_page = alloc_iommu(npages);
354 if (iommu_page == -1) {
355 iommu_full(dev, addr, size, dir);
356 return iommu_bus_base;
357 }
358
359 phys_mem &= PAGE_MASK;
360 for (i = 0; i < npages; i++, phys_mem += PAGE_SIZE) {
361 BUG_ON(phys_mem & ~PHYSICAL_PAGE_MASK);
362
363 /*
364 * Set coherent mapping here to avoid needing to flush
365 * the caches on mapping.
366 */
367 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
368
369 #ifdef CONFIG_IOMMU_LEAK
370 /* XXX need eventually caller of pci_map_sg */
371 if (iommu_leak_tab)
372 iommu_leak_tab[iommu_page + i] = __builtin_return_address(0);
373 #endif
374 }
375 flush_gart();
376
377 bus = iommu_bus_base + iommu_page*PAGE_SIZE;
378 return bus + ((unsigned long)addr & ~PAGE_MASK);
379 }
380
381 /*
382 * Free a temporary PCI mapping.
383 */
pci_unmap_single(struct pci_dev * hwdev,dma_addr_t dma_addr,size_t size,int direction)384 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
385 size_t size, int direction)
386 {
387 unsigned long iommu_page;
388 int npages;
389
390 #ifdef CONFIG_SWIOTLB
391 if (swiotlb) {
392 swiotlb_unmap_single(hwdev,dma_addr,size,direction);
393 return;
394 }
395 #endif
396
397
398 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
399 dma_addr >= iommu_bus_base + iommu_size)
400 return;
401 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
402 npages = round_up(size + (dma_addr & ~PAGE_MASK), PAGE_SIZE) >> PAGE_SHIFT;
403 int i;
404 for (i = 0; i < npages; i++) {
405 iommu_gatt_base[iommu_page + i] = 0;
406 #ifdef CONFIG_IOMMU_LEAK
407 if (iommu_leak_tab)
408 iommu_leak_tab[iommu_page + i] = 0;
409 #endif
410 }
411 free_iommu(iommu_page, npages);
412 }
413
414 EXPORT_SYMBOL(pci_map_single);
415 EXPORT_SYMBOL(pci_unmap_single);
416
check_iommu_size(unsigned long aper,u64 aper_size)417 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
418 {
419 unsigned long a;
420 if (!iommu_size) {
421 iommu_size = aper_size;
422 if (!no_agp)
423 iommu_size /= 2;
424 }
425
426 a = aper + iommu_size;
427 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
428
429 if (iommu_size < 64*1024*1024)
430 printk(KERN_WARNING
431 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
432
433 return iommu_size;
434 }
435
read_aperture(struct pci_dev * dev,u32 * size)436 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
437 {
438 unsigned aper_size = 0, aper_base_32;
439 u64 aper_base;
440 unsigned aper_order;
441
442 pci_read_config_dword(dev, 0x94, &aper_base_32);
443 pci_read_config_dword(dev, 0x90, &aper_order);
444 aper_order = (aper_order >> 1) & 7;
445
446 aper_base = aper_base_32 & 0x7fff;
447 aper_base <<= 25;
448
449 aper_size = (32 * 1024 * 1024) << aper_order;
450 if (aper_base + aper_size >= 0xffffffff || !aper_size)
451 aper_base = 0;
452
453 *size = aper_size;
454 return aper_base;
455 }
456
457 /*
458 * Private Northbridge GATT initialization in case we cannot use the
459 * AGP driver for some reason.
460 */
init_k8_gatt(agp_kern_info * info)461 static __init int init_k8_gatt(agp_kern_info *info)
462 {
463 struct pci_dev *dev;
464 void *gatt;
465 unsigned aper_base, new_aper_base;
466 unsigned aper_size, gatt_size, new_aper_size;
467
468 aper_size = aper_base = info->aper_size = 0;
469 for_all_nb(dev) {
470 new_aper_base = read_aperture(dev, &new_aper_size);
471 if (!new_aper_base)
472 goto nommu;
473
474 if (!aper_base) {
475 aper_size = new_aper_size;
476 aper_base = new_aper_base;
477 }
478 if (aper_size != new_aper_size || aper_base != new_aper_base)
479 goto nommu;
480 }
481 if (!aper_base)
482 goto nommu;
483 info->aper_base = aper_base;
484 info->aper_size = aper_size>>20;
485
486 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
487 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
488 if (!gatt)
489 panic("Cannot allocate GATT table");
490 memset(gatt, 0, gatt_size);
491 change_page_attr(virt_to_page(gatt), gatt_size/PAGE_SIZE, PAGE_KERNEL_NOCACHE);
492 agp_gatt_table = gatt;
493
494 for_all_nb(dev) {
495 u32 ctl;
496 u32 gatt_reg;
497
498 gatt_reg = __pa(gatt) >> 12;
499 gatt_reg <<= 4;
500 pci_write_config_dword(dev, 0x98, gatt_reg);
501 pci_read_config_dword(dev, 0x90, &ctl);
502
503 ctl |= 1;
504 ctl &= ~((1<<4) | (1<<5));
505
506 pci_write_config_dword(dev, 0x90, ctl);
507 }
508 flush_gart();
509
510
511 printk("PCI-DMA: aperture base @ %x size %u KB\n", aper_base, aper_size>>10);
512 return 0;
513
514 nommu:
515 /* XXX: reject 0xffffffff mask now in pci mapping functions */
516 if (end_pfn >= 0xffffffff>>PAGE_SHIFT)
517 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
518 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
519 return -1;
520 }
521
pci_iommu_init(void)522 void __init pci_iommu_init(void)
523 {
524 agp_kern_info info;
525 unsigned long aper_size;
526 unsigned long iommu_start;
527
528 #ifndef CONFIG_AGP
529 no_agp = 1;
530 #else
531 no_agp = no_agp || (agp_init() < 0) || (agp_copy_info(&info) < 0);
532 #endif
533
534 #ifdef CONFIG_SWIOTLB
535 if (swiotlb) {
536 no_iommu = 1;
537 printk(KERN_INFO "PCI-DMA: Using SWIOTLB\n");
538 return;
539 }
540 #endif
541
542 if (no_iommu || (!force_mmu && end_pfn < 0xffffffff>>PAGE_SHIFT) || !iommu_aperture) {
543 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
544 no_iommu = 1;
545 return;
546 }
547
548 if (no_agp) {
549 int err = -1;
550 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
551 no_agp = 1;
552 if (force_mmu || end_pfn >= 0xffffffff>>PAGE_SHIFT)
553 err = init_k8_gatt(&info);
554 if (err < 0) {
555 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
556 no_iommu = 1;
557 return;
558 }
559 }
560
561 aper_size = info.aper_size * 1024 * 1024;
562 iommu_size = check_iommu_size(info.aper_base, aper_size);
563 iommu_pages = iommu_size >> PAGE_SHIFT;
564
565 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
566 get_order(iommu_pages/8));
567 if (!iommu_gart_bitmap)
568 panic("Cannot allocate iommu bitmap\n");
569 memset(iommu_gart_bitmap, 0, iommu_pages/8);
570
571 #ifdef CONFIG_IOMMU_LEAK
572 if (leak_trace) {
573 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
574 get_order(iommu_pages*sizeof(void *)));
575 if (iommu_leak_tab)
576 memset(iommu_leak_tab, 0, iommu_pages * 8);
577 else
578 printk("PCI-DMA: Cannot allocate leak trace area\n");
579 }
580 #endif
581
582 /*
583 * Out of IOMMU space handling.
584 * Reserve some invalid pages at the beginning of the GART.
585 */
586 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
587
588 agp_memory_reserved = iommu_size;
589 printk(KERN_INFO"PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
590 iommu_size>>20);
591
592 iommu_start = aper_size - iommu_size;
593 iommu_bus_base = info.aper_base + iommu_start;
594 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
595 bad_dma_address = iommu_bus_base;
596
597 /*
598 * Unmap the IOMMU part of the GART. The alias of the page is always mapped
599 * with cache enabled and there is no full cache coherency across the GART
600 * remapping. The unmapping avoids automatic prefetches from the CPU
601 * allocating cache lines in there. All CPU accesses are done via the
602 * direct mapping to the backing memory. The GART address is only used by PCI
603 * devices.
604 */
605 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
606
607 struct pci_dev *dev;
608 for_all_nb(dev) {
609 u32 flag;
610 int cpu = PCI_SLOT(dev->devfn) - 24;
611 if (cpu >= NR_CPUS)
612 continue;
613 northbridges[cpu] = dev;
614
615 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
616 northbridge_flush_word[cpu] = flag;
617 }
618
619 asm volatile("wbinvd" ::: "memory");
620
621 flush_gart();
622 }
623
624 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]]
625 size set size of iommu (in bytes)
626 noagp don't initialize the AGP driver and use full aperture.
627 off don't use the IOMMU
628 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
629 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
630 noforce don't force IOMMU usage. Default
631 force Force IOMMU for all devices.
632 nofullflush use optimized IOMMU flushing (may break on some devices).
633 default off.
634 */
iommu_setup(char * opt)635 __init int iommu_setup(char *opt)
636 {
637 int arg;
638 char *p = opt;
639
640 for (;;) {
641 if (!memcmp(p,"noagp", 5))
642 no_agp = 1;
643 if (!memcmp(p,"off", 3))
644 no_iommu = 1;
645 if (!memcmp(p,"force", 5))
646 force_mmu = 1;
647 if (!memcmp(p,"noforce", 7))
648 force_mmu = 0;
649 if (!memcmp(p,"nofullflush", 11))
650 iommu_fullflush = 0;
651 if (!memcmp(p, "memaper", 7)) {
652 fallback_aper_force = 1;
653 p += 7;
654 if (*p == '=') {
655 ++p;
656 if (get_option(&p, &arg))
657 fallback_aper_order = arg;
658 }
659 }
660 #ifdef CONFIG_IOMMU_LEAK
661 if (!memcmp(p,"leak", 4)) {
662 leak_trace = 1;
663 p += 4;
664 if (*p == '=') ++p;
665 if (isdigit(*p) && get_option(&p, &arg))
666 iommu_leak_pages = arg;
667 } else
668 #endif
669 if (isdigit(*p) && get_option(&p, &arg))
670 iommu_size = arg;
671 do {
672 if (*p == ' ' || *p == 0)
673 return 0;
674 } while (*p++ != ',');
675 }
676 return 1;
677 }
678
679