1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
12 
13 static u32 *flush_words;
14 
15 const struct pci_device_id amd_nb_misc_ids[] = {
16 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
19 	{}
20 };
21 EXPORT_SYMBOL(amd_nb_misc_ids);
22 
23 static struct pci_device_id amd_nb_link_ids[] = {
24 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
25 	{}
26 };
27 
28 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
29 	{ 0x00, 0x18, 0x20 },
30 	{ 0xff, 0x00, 0x20 },
31 	{ 0xfe, 0x00, 0x20 },
32 	{ }
33 };
34 
35 struct amd_northbridge_info amd_northbridges;
36 EXPORT_SYMBOL(amd_northbridges);
37 
next_northbridge(struct pci_dev * dev,const struct pci_device_id * ids)38 static struct pci_dev *next_northbridge(struct pci_dev *dev,
39 					const struct pci_device_id *ids)
40 {
41 	do {
42 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
43 		if (!dev)
44 			break;
45 	} while (!pci_match_id(ids, dev));
46 	return dev;
47 }
48 
amd_cache_northbridges(void)49 int amd_cache_northbridges(void)
50 {
51 	u16 i = 0;
52 	struct amd_northbridge *nb;
53 	struct pci_dev *misc, *link;
54 
55 	if (amd_nb_num())
56 		return 0;
57 
58 	misc = NULL;
59 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60 		i++;
61 
62 	if (i == 0)
63 		return 0;
64 
65 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
66 	if (!nb)
67 		return -ENOMEM;
68 
69 	amd_northbridges.nb = nb;
70 	amd_northbridges.num = i;
71 
72 	link = misc = NULL;
73 	for (i = 0; i != amd_nb_num(); i++) {
74 		node_to_amd_nb(i)->misc = misc =
75 			next_northbridge(misc, amd_nb_misc_ids);
76 		node_to_amd_nb(i)->link = link =
77 			next_northbridge(link, amd_nb_link_ids);
78         }
79 
80 	/* some CPU families (e.g. family 0x11) do not support GART */
81 	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
82 	    boot_cpu_data.x86 == 0x15)
83 		amd_northbridges.flags |= AMD_NB_GART;
84 
85 	/*
86 	 * Some CPU families support L3 Cache Index Disable. There are some
87 	 * limitations because of E382 and E388 on family 0x10.
88 	 */
89 	if (boot_cpu_data.x86 == 0x10 &&
90 	    boot_cpu_data.x86_model >= 0x8 &&
91 	    (boot_cpu_data.x86_model > 0x9 ||
92 	     boot_cpu_data.x86_mask >= 0x1))
93 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
94 
95 	if (boot_cpu_data.x86 == 0x15)
96 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97 
98 	/* L3 cache partitioning is supported on family 0x15 */
99 	if (boot_cpu_data.x86 == 0x15)
100 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
101 
102 	return 0;
103 }
104 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105 
106 /*
107  * Ignores subdevice/subvendor but as far as I can figure out
108  * they're useless anyways
109  */
early_is_amd_nb(u32 device)110 bool __init early_is_amd_nb(u32 device)
111 {
112 	const struct pci_device_id *id;
113 	u32 vendor = device & 0xffff;
114 
115 	device >>= 16;
116 	for (id = amd_nb_misc_ids; id->vendor; id++)
117 		if (vendor == id->vendor && device == id->device)
118 			return true;
119 	return false;
120 }
121 
amd_get_subcaches(int cpu)122 int amd_get_subcaches(int cpu)
123 {
124 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
125 	unsigned int mask;
126 	int cuid = 0;
127 
128 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
129 		return 0;
130 
131 	pci_read_config_dword(link, 0x1d4, &mask);
132 
133 #ifdef CONFIG_SMP
134 	cuid = cpu_data(cpu).compute_unit_id;
135 #endif
136 	return (mask >> (4 * cuid)) & 0xf;
137 }
138 
amd_set_subcaches(int cpu,int mask)139 int amd_set_subcaches(int cpu, int mask)
140 {
141 	static unsigned int reset, ban;
142 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
143 	unsigned int reg;
144 	int cuid = 0;
145 
146 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
147 		return -EINVAL;
148 
149 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
150 	if (reset == 0) {
151 		pci_read_config_dword(nb->link, 0x1d4, &reset);
152 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
153 		ban &= 0x180000;
154 	}
155 
156 	/* deactivate BAN mode if any subcaches are to be disabled */
157 	if (mask != 0xf) {
158 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
159 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
160 	}
161 
162 #ifdef CONFIG_SMP
163 	cuid = cpu_data(cpu).compute_unit_id;
164 #endif
165 	mask <<= 4 * cuid;
166 	mask |= (0xf ^ (1 << cuid)) << 26;
167 
168 	pci_write_config_dword(nb->link, 0x1d4, mask);
169 
170 	/* reset BAN mode if L3 partitioning returned to reset state */
171 	pci_read_config_dword(nb->link, 0x1d4, &reg);
172 	if (reg == reset) {
173 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
174 		reg &= ~0x180000;
175 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
176 	}
177 
178 	return 0;
179 }
180 
amd_cache_gart(void)181 static int amd_cache_gart(void)
182 {
183 	u16 i;
184 
185        if (!amd_nb_has_feature(AMD_NB_GART))
186                return 0;
187 
188        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
189        if (!flush_words) {
190                amd_northbridges.flags &= ~AMD_NB_GART;
191                return -ENOMEM;
192        }
193 
194        for (i = 0; i != amd_nb_num(); i++)
195                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
196                                      &flush_words[i]);
197 
198        return 0;
199 }
200 
amd_flush_garts(void)201 void amd_flush_garts(void)
202 {
203 	int flushed, i;
204 	unsigned long flags;
205 	static DEFINE_SPINLOCK(gart_lock);
206 
207 	if (!amd_nb_has_feature(AMD_NB_GART))
208 		return;
209 
210 	/* Avoid races between AGP and IOMMU. In theory it's not needed
211 	   but I'm not sure if the hardware won't lose flush requests
212 	   when another is pending. This whole thing is so expensive anyways
213 	   that it doesn't matter to serialize more. -AK */
214 	spin_lock_irqsave(&gart_lock, flags);
215 	flushed = 0;
216 	for (i = 0; i < amd_nb_num(); i++) {
217 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
218 				       flush_words[i] | 1);
219 		flushed++;
220 	}
221 	for (i = 0; i < amd_nb_num(); i++) {
222 		u32 w;
223 		/* Make sure the hardware actually executed the flush*/
224 		for (;;) {
225 			pci_read_config_dword(node_to_amd_nb(i)->misc,
226 					      0x9c, &w);
227 			if (!(w & 1))
228 				break;
229 			cpu_relax();
230 		}
231 	}
232 	spin_unlock_irqrestore(&gart_lock, flags);
233 	if (!flushed)
234 		printk("nothing to flush?\n");
235 }
236 EXPORT_SYMBOL_GPL(amd_flush_garts);
237 
init_amd_nbs(void)238 static __init int init_amd_nbs(void)
239 {
240 	int err = 0;
241 
242 	err = amd_cache_northbridges();
243 
244 	if (err < 0)
245 		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
246 
247 	if (amd_cache_gart() < 0)
248 		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
249 		       "GART support disabled.\n");
250 
251 	return err;
252 }
253 
254 /* This has to go after the PCI subsystem */
255 fs_initcall(init_amd_nbs);
256