1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2001-2003 SuSE Labs.
4  * Distributed under the GNU public license, v2.
5  *
6  * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
7  * It also includes support for the AMD 8151 AGP bridge,
8  * although it doesn't actually do much, as all the real
9  * work is done in the northbridge(s).
10  */
11 
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/init.h>
15 #include <linux/agp_backend.h>
16 #include <linux/mmzone.h>
17 #include <asm/page.h>		/* PAGE_SIZE */
18 #include <asm/e820/api.h>
19 #include <asm/amd_nb.h>
20 #include <asm/gart.h>
21 #include "agp.h"
22 
23 /* NVIDIA K8 registers */
24 #define NVIDIA_X86_64_0_APBASE		0x10
25 #define NVIDIA_X86_64_1_APBASE1		0x50
26 #define NVIDIA_X86_64_1_APLIMIT1	0x54
27 #define NVIDIA_X86_64_1_APSIZE		0xa8
28 #define NVIDIA_X86_64_1_APBASE2		0xd8
29 #define NVIDIA_X86_64_1_APLIMIT2	0xdc
30 
31 /* ULi K8 registers */
32 #define ULI_X86_64_BASE_ADDR		0x10
33 #define ULI_X86_64_HTT_FEA_REG		0x50
34 #define ULI_X86_64_ENU_SCR_REG		0x54
35 
36 static struct resource *aperture_resource;
37 static bool __initdata agp_try_unsupported = 1;
38 static int agp_bridges_found;
39 
amd64_tlbflush(struct agp_memory * temp)40 static void amd64_tlbflush(struct agp_memory *temp)
41 {
42 	amd_flush_garts();
43 }
44 
amd64_insert_memory(struct agp_memory * mem,off_t pg_start,int type)45 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
46 {
47 	int i, j, num_entries;
48 	long long tmp;
49 	int mask_type;
50 	struct agp_bridge_data *bridge = mem->bridge;
51 	u32 pte;
52 
53 	num_entries = agp_num_entries();
54 
55 	if (type != mem->type)
56 		return -EINVAL;
57 	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
58 	if (mask_type != 0)
59 		return -EINVAL;
60 
61 
62 	/* Make sure we can fit the range in the gatt table. */
63 	/* FIXME: could wrap */
64 	if (((unsigned long)pg_start + mem->page_count) > num_entries)
65 		return -EINVAL;
66 
67 	j = pg_start;
68 
69 	/* gatt table should be empty. */
70 	while (j < (pg_start + mem->page_count)) {
71 		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
72 			return -EBUSY;
73 		j++;
74 	}
75 
76 	if (!mem->is_flushed) {
77 		global_cache_flush();
78 		mem->is_flushed = true;
79 	}
80 
81 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
82 		tmp = agp_bridge->driver->mask_memory(agp_bridge,
83 						      page_to_phys(mem->pages[i]),
84 						      mask_type);
85 
86 		BUG_ON(tmp & 0xffffff0000000ffcULL);
87 		pte = (tmp & 0x000000ff00000000ULL) >> 28;
88 		pte |=(tmp & 0x00000000fffff000ULL);
89 		pte |= GPTE_VALID | GPTE_COHERENT;
90 
91 		writel(pte, agp_bridge->gatt_table+j);
92 		readl(agp_bridge->gatt_table+j);	/* PCI Posting. */
93 	}
94 	amd64_tlbflush(mem);
95 	return 0;
96 }
97 
98 /*
99  * This hack alters the order element according
100  * to the size of a long. It sucks. I totally disown this, even
101  * though it does appear to work for the most part.
102  */
103 static struct aper_size_info_32 amd64_aperture_sizes[7] =
104 {
105 	{32,   8192,   3+(sizeof(long)/8), 0 },
106 	{64,   16384,  4+(sizeof(long)/8), 1<<1 },
107 	{128,  32768,  5+(sizeof(long)/8), 1<<2 },
108 	{256,  65536,  6+(sizeof(long)/8), 1<<1 | 1<<2 },
109 	{512,  131072, 7+(sizeof(long)/8), 1<<3 },
110 	{1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
111 	{2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
112 };
113 
114 
115 /*
116  * Get the current Aperture size from the x86-64.
117  * Note, that there may be multiple x86-64's, but we just return
118  * the value from the first one we find. The set_size functions
119  * keep the rest coherent anyway. Or at least should do.
120  */
amd64_fetch_size(void)121 static int amd64_fetch_size(void)
122 {
123 	struct pci_dev *dev;
124 	int i;
125 	u32 temp;
126 	struct aper_size_info_32 *values;
127 
128 	dev = node_to_amd_nb(0)->misc;
129 	if (dev==NULL)
130 		return 0;
131 
132 	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
133 	temp = (temp & 0xe);
134 	values = A_SIZE_32(amd64_aperture_sizes);
135 
136 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
137 		if (temp == values[i].size_value) {
138 			agp_bridge->previous_size =
139 			    agp_bridge->current_size = (void *) (values + i);
140 
141 			agp_bridge->aperture_size_idx = i;
142 			return values[i].size;
143 		}
144 	}
145 	return 0;
146 }
147 
148 /*
149  * In a multiprocessor x86-64 system, this function gets
150  * called once for each CPU.
151  */
amd64_configure(struct pci_dev * hammer,u64 gatt_table)152 static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
153 {
154 	u64 aperturebase;
155 	u32 tmp;
156 	u64 aper_base;
157 
158 	/* Address to map to */
159 	pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
160 	aperturebase = (u64)tmp << 25;
161 	aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
162 
163 	enable_gart_translation(hammer, gatt_table);
164 
165 	return aper_base;
166 }
167 
168 
169 static const struct aper_size_info_32 amd_8151_sizes[7] =
170 {
171 	{2048, 524288, 9, 0x00000000 },	/* 0 0 0 0 0 0 */
172 	{1024, 262144, 8, 0x00000400 },	/* 1 0 0 0 0 0 */
173 	{512,  131072, 7, 0x00000600 },	/* 1 1 0 0 0 0 */
174 	{256,  65536,  6, 0x00000700 },	/* 1 1 1 0 0 0 */
175 	{128,  32768,  5, 0x00000720 },	/* 1 1 1 1 0 0 */
176 	{64,   16384,  4, 0x00000730 },	/* 1 1 1 1 1 0 */
177 	{32,   8192,   3, 0x00000738 }	/* 1 1 1 1 1 1 */
178 };
179 
amd_8151_configure(void)180 static int amd_8151_configure(void)
181 {
182 	unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
183 	int i;
184 
185 	if (!amd_nb_has_feature(AMD_NB_GART))
186 		return 0;
187 
188 	/* Configure AGP regs in each x86-64 host bridge. */
189 	for (i = 0; i < amd_nb_num(); i++) {
190 		agp_bridge->gart_bus_addr =
191 			amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
192 	}
193 	amd_flush_garts();
194 	return 0;
195 }
196 
197 
amd64_cleanup(void)198 static void amd64_cleanup(void)
199 {
200 	u32 tmp;
201 	int i;
202 
203 	if (!amd_nb_has_feature(AMD_NB_GART))
204 		return;
205 
206 	for (i = 0; i < amd_nb_num(); i++) {
207 		struct pci_dev *dev = node_to_amd_nb(i)->misc;
208 		/* disable gart translation */
209 		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
210 		tmp &= ~GARTEN;
211 		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
212 	}
213 }
214 
215 
216 static const struct agp_bridge_driver amd_8151_driver = {
217 	.owner			= THIS_MODULE,
218 	.aperture_sizes		= amd_8151_sizes,
219 	.size_type		= U32_APER_SIZE,
220 	.num_aperture_sizes	= 7,
221 	.needs_scratch_page	= true,
222 	.configure		= amd_8151_configure,
223 	.fetch_size		= amd64_fetch_size,
224 	.cleanup		= amd64_cleanup,
225 	.tlb_flush		= amd64_tlbflush,
226 	.mask_memory		= agp_generic_mask_memory,
227 	.masks			= NULL,
228 	.agp_enable		= agp_generic_enable,
229 	.cache_flush		= global_cache_flush,
230 	.create_gatt_table	= agp_generic_create_gatt_table,
231 	.free_gatt_table	= agp_generic_free_gatt_table,
232 	.insert_memory		= amd64_insert_memory,
233 	.remove_memory		= agp_generic_remove_memory,
234 	.alloc_by_type		= agp_generic_alloc_by_type,
235 	.free_by_type		= agp_generic_free_by_type,
236 	.agp_alloc_page		= agp_generic_alloc_page,
237 	.agp_alloc_pages	= agp_generic_alloc_pages,
238 	.agp_destroy_page	= agp_generic_destroy_page,
239 	.agp_destroy_pages	= agp_generic_destroy_pages,
240 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
241 };
242 
243 /* Some basic sanity checks for the aperture. */
agp_aperture_valid(u64 aper,u32 size)244 static int agp_aperture_valid(u64 aper, u32 size)
245 {
246 	if (!aperture_valid(aper, size, 32*1024*1024))
247 		return 0;
248 
249 	/* Request the Aperture. This catches cases when someone else
250 	   already put a mapping in there - happens with some very broken BIOS
251 
252 	   Maybe better to use pci_assign_resource/pci_enable_device instead
253 	   trusting the bridges? */
254 	if (!aperture_resource &&
255 	    !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
256 		printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
257 		return 0;
258 	}
259 	return 1;
260 }
261 
262 /*
263  * W*s centric BIOS sometimes only set up the aperture in the AGP
264  * bridge, not the northbridge. On AMD64 this is handled early
265  * in aperture.c, but when IOMMU is not enabled or we run
266  * on a 32bit kernel this needs to be redone.
267  * Unfortunately it is impossible to fix the aperture here because it's too late
268  * to allocate that much memory. But at least error out cleanly instead of
269  * crashing.
270  */
fix_northbridge(struct pci_dev * nb,struct pci_dev * agp,u16 cap)271 static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
272 {
273 	u64 aper, nb_aper;
274 	int order = 0;
275 	u32 nb_order, nb_base;
276 	u16 apsize;
277 
278 	pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
279 	nb_order = (nb_order >> 1) & 7;
280 	pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
281 	nb_aper = (u64)nb_base << 25;
282 
283 	/* Northbridge seems to contain crap. Try the AGP bridge. */
284 
285 	pci_read_config_word(agp, cap+0x14, &apsize);
286 	if (apsize == 0xffff) {
287 		if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
288 			return 0;
289 		return -1;
290 	}
291 
292 	apsize &= 0xfff;
293 	/* Some BIOS use weird encodings not in the AGPv3 table. */
294 	if (apsize & 0xff)
295 		apsize |= 0xf00;
296 	order = 7 - hweight16(apsize);
297 
298 	aper = pci_bus_address(agp, AGP_APERTURE_BAR);
299 
300 	/*
301 	 * On some sick chips APSIZE is 0. This means it wants 4G
302 	 * so let double check that order, and lets trust the AMD NB settings
303 	 */
304 	if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
305 		dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
306 			 32 << order);
307 		order = nb_order;
308 	}
309 
310 	if (nb_order >= order) {
311 		if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
312 			return 0;
313 	}
314 
315 	dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
316 		 aper, 32 << order);
317 	if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
318 		return -1;
319 
320 	gart_set_size_and_enable(nb, order);
321 	pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
322 
323 	return 0;
324 }
325 
cache_nbs(struct pci_dev * pdev,u32 cap_ptr)326 static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
327 {
328 	int i;
329 
330 	if (!amd_nb_num())
331 		return -ENODEV;
332 
333 	if (!amd_nb_has_feature(AMD_NB_GART))
334 		return -ENODEV;
335 
336 	i = 0;
337 	for (i = 0; i < amd_nb_num(); i++) {
338 		struct pci_dev *dev = node_to_amd_nb(i)->misc;
339 		if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
340 			dev_err(&dev->dev, "no usable aperture found\n");
341 #ifdef __x86_64__
342 			/* should port this to i386 */
343 			dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
344 #endif
345 			return -1;
346 		}
347 	}
348 	return 0;
349 }
350 
351 /* Handle AMD 8151 quirks */
amd8151_init(struct pci_dev * pdev,struct agp_bridge_data * bridge)352 static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
353 {
354 	char *revstring;
355 
356 	switch (pdev->revision) {
357 	case 0x01: revstring="A0"; break;
358 	case 0x02: revstring="A1"; break;
359 	case 0x11: revstring="B0"; break;
360 	case 0x12: revstring="B1"; break;
361 	case 0x13: revstring="B2"; break;
362 	case 0x14: revstring="B3"; break;
363 	default:   revstring="??"; break;
364 	}
365 
366 	dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
367 
368 	/*
369 	 * Work around errata.
370 	 * Chips before B2 stepping incorrectly reporting v3.5
371 	 */
372 	if (pdev->revision < 0x13) {
373 		dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
374 		bridge->major_version = 3;
375 		bridge->minor_version = 0;
376 	}
377 }
378 
379 
380 static const struct aper_size_info_32 uli_sizes[7] =
381 {
382 	{256, 65536, 6, 10},
383 	{128, 32768, 5, 9},
384 	{64, 16384, 4, 8},
385 	{32, 8192, 3, 7},
386 	{16, 4096, 2, 6},
387 	{8, 2048, 1, 4},
388 	{4, 1024, 0, 3}
389 };
uli_agp_init(struct pci_dev * pdev)390 static int uli_agp_init(struct pci_dev *pdev)
391 {
392 	u32 httfea,baseaddr,enuscr;
393 	struct pci_dev *dev1;
394 	int i, ret;
395 	unsigned size = amd64_fetch_size();
396 
397 	dev_info(&pdev->dev, "setting up ULi AGP\n");
398 	dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
399 	if (dev1 == NULL) {
400 		dev_info(&pdev->dev, "can't find ULi secondary device\n");
401 		return -ENODEV;
402 	}
403 
404 	for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
405 		if (uli_sizes[i].size == size)
406 			break;
407 
408 	if (i == ARRAY_SIZE(uli_sizes)) {
409 		dev_info(&pdev->dev, "no ULi size found for %d\n", size);
410 		ret = -ENODEV;
411 		goto put;
412 	}
413 
414 	/* shadow x86-64 registers into ULi registers */
415 	pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
416 			       &httfea);
417 
418 	/* if x86-64 aperture base is beyond 4G, exit here */
419 	if ((httfea & 0x7fff) >> (32 - 25)) {
420 		ret = -ENODEV;
421 		goto put;
422 	}
423 
424 	httfea = (httfea& 0x7fff) << 25;
425 
426 	pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
427 	baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
428 	baseaddr|= httfea;
429 	pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
430 
431 	enuscr= httfea+ (size * 1024 * 1024) - 1;
432 	pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
433 	pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
434 	ret = 0;
435 put:
436 	pci_dev_put(dev1);
437 	return ret;
438 }
439 
440 
441 static const struct aper_size_info_32 nforce3_sizes[5] =
442 {
443 	{512,  131072, 7, 0x00000000 },
444 	{256,  65536,  6, 0x00000008 },
445 	{128,  32768,  5, 0x0000000C },
446 	{64,   16384,  4, 0x0000000E },
447 	{32,   8192,   3, 0x0000000F }
448 };
449 
450 /* Handle shadow device of the Nvidia NForce3 */
451 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
nforce3_agp_init(struct pci_dev * pdev)452 static int nforce3_agp_init(struct pci_dev *pdev)
453 {
454 	u32 tmp, apbase, apbar, aplimit;
455 	struct pci_dev *dev1;
456 	int i, ret;
457 	unsigned size = amd64_fetch_size();
458 
459 	dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
460 
461 	dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
462 	if (dev1 == NULL) {
463 		dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
464 		return -ENODEV;
465 	}
466 
467 	for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
468 		if (nforce3_sizes[i].size == size)
469 			break;
470 
471 	if (i == ARRAY_SIZE(nforce3_sizes)) {
472 		dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
473 		ret = -ENODEV;
474 		goto put;
475 	}
476 
477 	pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
478 	tmp &= ~(0xf);
479 	tmp |= nforce3_sizes[i].size_value;
480 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
481 
482 	/* shadow x86-64 registers into NVIDIA registers */
483 	pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
484 			       &apbase);
485 
486 	/* if x86-64 aperture base is beyond 4G, exit here */
487 	if ( (apbase & 0x7fff) >> (32 - 25) ) {
488 		dev_info(&pdev->dev, "aperture base > 4G\n");
489 		ret = -ENODEV;
490 		goto put;
491 	}
492 
493 	apbase = (apbase & 0x7fff) << 25;
494 
495 	pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
496 	apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
497 	apbar |= apbase;
498 	pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
499 
500 	aplimit = apbase + (size * 1024 * 1024) - 1;
501 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
502 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
503 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
504 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
505 
506 	ret = 0;
507 put:
508 	pci_dev_put(dev1);
509 
510 	return ret;
511 }
512 
agp_amd64_probe(struct pci_dev * pdev,const struct pci_device_id * ent)513 static int agp_amd64_probe(struct pci_dev *pdev,
514 			   const struct pci_device_id *ent)
515 {
516 	struct agp_bridge_data *bridge;
517 	u8 cap_ptr;
518 	int err;
519 
520 	/* The Highlander principle */
521 	if (agp_bridges_found)
522 		return -ENODEV;
523 
524 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
525 	if (!cap_ptr)
526 		return -ENODEV;
527 
528 	/* Could check for AGPv3 here */
529 
530 	bridge = agp_alloc_bridge();
531 	if (!bridge)
532 		return -ENOMEM;
533 
534 	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
535 	    pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
536 		amd8151_init(pdev, bridge);
537 	} else {
538 		dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
539 			 pdev->vendor, pdev->device);
540 	}
541 
542 	bridge->driver = &amd_8151_driver;
543 	bridge->dev = pdev;
544 	bridge->capndx = cap_ptr;
545 
546 	/* Fill in the mode register */
547 	pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
548 
549 	if (cache_nbs(pdev, cap_ptr) == -1) {
550 		agp_put_bridge(bridge);
551 		return -ENODEV;
552 	}
553 
554 	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
555 		int ret = nforce3_agp_init(pdev);
556 		if (ret) {
557 			agp_put_bridge(bridge);
558 			return ret;
559 		}
560 	}
561 
562 	if (pdev->vendor == PCI_VENDOR_ID_AL) {
563 		int ret = uli_agp_init(pdev);
564 		if (ret) {
565 			agp_put_bridge(bridge);
566 			return ret;
567 		}
568 	}
569 
570 	pci_set_drvdata(pdev, bridge);
571 	err = agp_add_bridge(bridge);
572 	if (err < 0)
573 		return err;
574 
575 	agp_bridges_found++;
576 	return 0;
577 }
578 
agp_amd64_remove(struct pci_dev * pdev)579 static void agp_amd64_remove(struct pci_dev *pdev)
580 {
581 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
582 
583 	release_mem_region(virt_to_phys(bridge->gatt_table_real),
584 			   amd64_aperture_sizes[bridge->aperture_size_idx].size);
585 	agp_remove_bridge(bridge);
586 	agp_put_bridge(bridge);
587 
588 	agp_bridges_found--;
589 }
590 
591 #define agp_amd64_suspend NULL
592 
agp_amd64_resume(struct device * dev)593 static int __maybe_unused agp_amd64_resume(struct device *dev)
594 {
595 	struct pci_dev *pdev = to_pci_dev(dev);
596 
597 	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
598 		nforce3_agp_init(pdev);
599 
600 	return amd_8151_configure();
601 }
602 
603 static const struct pci_device_id agp_amd64_pci_table[] = {
604 	{
605 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
606 	.class_mask	= ~0,
607 	.vendor		= PCI_VENDOR_ID_AMD,
608 	.device		= PCI_DEVICE_ID_AMD_8151_0,
609 	.subvendor	= PCI_ANY_ID,
610 	.subdevice	= PCI_ANY_ID,
611 	},
612 	/* ULi M1689 */
613 	{
614 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
615 	.class_mask	= ~0,
616 	.vendor		= PCI_VENDOR_ID_AL,
617 	.device		= PCI_DEVICE_ID_AL_M1689,
618 	.subvendor	= PCI_ANY_ID,
619 	.subdevice	= PCI_ANY_ID,
620 	},
621 	/* VIA K8T800Pro */
622 	{
623 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
624 	.class_mask	= ~0,
625 	.vendor		= PCI_VENDOR_ID_VIA,
626 	.device		= PCI_DEVICE_ID_VIA_K8T800PRO_0,
627 	.subvendor	= PCI_ANY_ID,
628 	.subdevice	= PCI_ANY_ID,
629 	},
630 	/* VIA K8T800 */
631 	{
632 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
633 	.class_mask	= ~0,
634 	.vendor		= PCI_VENDOR_ID_VIA,
635 	.device		= PCI_DEVICE_ID_VIA_8385_0,
636 	.subvendor	= PCI_ANY_ID,
637 	.subdevice	= PCI_ANY_ID,
638 	},
639 	/* VIA K8M800 / K8N800 */
640 	{
641 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
642 	.class_mask	= ~0,
643 	.vendor		= PCI_VENDOR_ID_VIA,
644 	.device		= PCI_DEVICE_ID_VIA_8380_0,
645 	.subvendor	= PCI_ANY_ID,
646 	.subdevice	= PCI_ANY_ID,
647 	},
648 	/* VIA K8M890 / K8N890 */
649 	{
650 	.class          = (PCI_CLASS_BRIDGE_HOST << 8),
651 	.class_mask     = ~0,
652 	.vendor         = PCI_VENDOR_ID_VIA,
653 	.device         = PCI_DEVICE_ID_VIA_VT3336,
654 	.subvendor      = PCI_ANY_ID,
655 	.subdevice      = PCI_ANY_ID,
656 	},
657 	/* VIA K8T890 */
658 	{
659 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
660 	.class_mask	= ~0,
661 	.vendor		= PCI_VENDOR_ID_VIA,
662 	.device		= PCI_DEVICE_ID_VIA_3238_0,
663 	.subvendor	= PCI_ANY_ID,
664 	.subdevice	= PCI_ANY_ID,
665 	},
666 	/* VIA K8T800/K8M800/K8N800 */
667 	{
668 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
669 	.class_mask	= ~0,
670 	.vendor		= PCI_VENDOR_ID_VIA,
671 	.device		= PCI_DEVICE_ID_VIA_838X_1,
672 	.subvendor	= PCI_ANY_ID,
673 	.subdevice	= PCI_ANY_ID,
674 	},
675 	/* NForce3 */
676 	{
677 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
678 	.class_mask	= ~0,
679 	.vendor		= PCI_VENDOR_ID_NVIDIA,
680 	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE3,
681 	.subvendor	= PCI_ANY_ID,
682 	.subdevice	= PCI_ANY_ID,
683 	},
684 	{
685 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
686 	.class_mask	= ~0,
687 	.vendor		= PCI_VENDOR_ID_NVIDIA,
688 	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE3S,
689 	.subvendor	= PCI_ANY_ID,
690 	.subdevice	= PCI_ANY_ID,
691 	},
692 	/* SIS 755 */
693 	{
694 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
695 	.class_mask	= ~0,
696 	.vendor		= PCI_VENDOR_ID_SI,
697 	.device		= PCI_DEVICE_ID_SI_755,
698 	.subvendor	= PCI_ANY_ID,
699 	.subdevice	= PCI_ANY_ID,
700 	},
701 	/* SIS 760 */
702 	{
703 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
704 	.class_mask	= ~0,
705 	.vendor		= PCI_VENDOR_ID_SI,
706 	.device		= PCI_DEVICE_ID_SI_760,
707 	.subvendor	= PCI_ANY_ID,
708 	.subdevice	= PCI_ANY_ID,
709 	},
710 	/* ALI/ULI M1695 */
711 	{
712 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
713 	.class_mask	= ~0,
714 	.vendor		= PCI_VENDOR_ID_AL,
715 	.device		= 0x1695,
716 	.subvendor	= PCI_ANY_ID,
717 	.subdevice	= PCI_ANY_ID,
718 	},
719 
720 	{ }
721 };
722 
723 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
724 
725 static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
726 	{ PCI_DEVICE_CLASS(0, 0) },
727 	{ }
728 };
729 
730 static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume);
731 
732 static struct pci_driver agp_amd64_pci_driver = {
733 	.name		= "agpgart-amd64",
734 	.id_table	= agp_amd64_pci_table,
735 	.probe		= agp_amd64_probe,
736 	.remove		= agp_amd64_remove,
737 	.driver.pm  = &agp_amd64_pm_ops,
738 };
739 
740 
741 /* Not static due to IOMMU code calling it early. */
agp_amd64_init(void)742 int __init agp_amd64_init(void)
743 {
744 	int err = 0;
745 
746 	if (agp_off)
747 		return -EINVAL;
748 
749 	err = pci_register_driver(&agp_amd64_pci_driver);
750 	if (err < 0)
751 		return err;
752 
753 	if (agp_bridges_found == 0) {
754 		if (!agp_try_unsupported && !agp_try_unsupported_boot) {
755 			printk(KERN_INFO PFX "No supported AGP bridge found.\n");
756 #ifdef MODULE
757 			printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
758 #else
759 			printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
760 #endif
761 			pci_unregister_driver(&agp_amd64_pci_driver);
762 			return -ENODEV;
763 		}
764 
765 		/* First check that we have at least one AMD64 NB */
766 		if (!amd_nb_num()) {
767 			pci_unregister_driver(&agp_amd64_pci_driver);
768 			return -ENODEV;
769 		}
770 
771 		/* Look for any AGP bridge */
772 		agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
773 		err = driver_attach(&agp_amd64_pci_driver.driver);
774 		if (err == 0 && agp_bridges_found == 0) {
775 			pci_unregister_driver(&agp_amd64_pci_driver);
776 			err = -ENODEV;
777 		}
778 	}
779 	return err;
780 }
781 
agp_amd64_mod_init(void)782 static int __init agp_amd64_mod_init(void)
783 {
784 #ifndef MODULE
785 	if (gart_iommu_aperture)
786 		return agp_bridges_found ? 0 : -ENODEV;
787 #endif
788 	return agp_amd64_init();
789 }
790 
agp_amd64_cleanup(void)791 static void __exit agp_amd64_cleanup(void)
792 {
793 #ifndef MODULE
794 	if (gart_iommu_aperture)
795 		return;
796 #endif
797 	if (aperture_resource)
798 		release_resource(aperture_resource);
799 	pci_unregister_driver(&agp_amd64_pci_driver);
800 }
801 
802 module_init(agp_amd64_mod_init);
803 module_exit(agp_amd64_cleanup);
804 
805 MODULE_AUTHOR("Dave Jones, Andi Kleen");
806 module_param(agp_try_unsupported, bool, 0);
807 MODULE_LICENSE("GPL");
808