1 /*  Generic MTRR (Memory Type Range Register) driver.
2 
3     Copyright (C) 1997-2000  Richard Gooch
4 
5     This library is free software; you can redistribute it and/or
6     modify it under the terms of the GNU Library General Public
7     License as published by the Free Software Foundation; either
8     version 2 of the License, or (at your option) any later version.
9 
10     This library is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13     Library General Public License for more details.
14 
15     You should have received a copy of the GNU Library General Public
16     License along with this library; if not, write to the Free
17     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 
19     Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
20     The postal address is:
21       Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22 
23     Source: "Pentium Pro Family Developer's Manual, Volume 3:
24     Operating System Writer's Guide" (Intel document number 242692),
25     section 11.11.7
26 
27     ChangeLog
28 
29     Prehistory Martin Tischh�user <martin@ikcbarka.fzk.de>
30 	       Initial register-setting code (from proform-1.0).
31     19971216   Richard Gooch <rgooch@atnf.csiro.au>
32                Original version for /proc/mtrr interface, SMP-safe.
33   v1.0
34     19971217   Richard Gooch <rgooch@atnf.csiro.au>
35                Bug fix for ioctls()'s.
36 	       Added sample code in Documentation/mtrr.txt
37   v1.1
38     19971218   Richard Gooch <rgooch@atnf.csiro.au>
39                Disallow overlapping regions.
40     19971219   Jens Maurer <jmaurer@menuett.rhein-main.de>
41                Register-setting fixups.
42   v1.2
43     19971222   Richard Gooch <rgooch@atnf.csiro.au>
44                Fixups for kernel 2.1.75.
45   v1.3
46     19971229   David Wragg <dpw@doc.ic.ac.uk>
47                Register-setting fixups and conformity with Intel conventions.
48     19971229   Richard Gooch <rgooch@atnf.csiro.au>
49                Cosmetic changes and wrote this ChangeLog ;-)
50     19980106   Richard Gooch <rgooch@atnf.csiro.au>
51                Fixups for kernel 2.1.78.
52   v1.4
53     19980119   David Wragg <dpw@doc.ic.ac.uk>
54                Included passive-release enable code (elsewhere in PCI setup).
55   v1.5
56     19980131   Richard Gooch <rgooch@atnf.csiro.au>
57                Replaced global kernel lock with private spinlock.
58   v1.6
59     19980201   Richard Gooch <rgooch@atnf.csiro.au>
60                Added wait for other CPUs to complete changes.
61   v1.7
62     19980202   Richard Gooch <rgooch@atnf.csiro.au>
63                Bug fix in definition of <set_mtrr> for UP.
64   v1.8
65     19980319   Richard Gooch <rgooch@atnf.csiro.au>
66                Fixups for kernel 2.1.90.
67     19980323   Richard Gooch <rgooch@atnf.csiro.au>
68                Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
69   v1.9
70     19980325   Richard Gooch <rgooch@atnf.csiro.au>
71                Fixed test for overlapping regions: confused by adjacent regions
72     19980326   Richard Gooch <rgooch@atnf.csiro.au>
73                Added wbinvd in <set_mtrr_prepare>.
74     19980401   Richard Gooch <rgooch@atnf.csiro.au>
75                Bug fix for non-SMP compilation.
76     19980418   David Wragg <dpw@doc.ic.ac.uk>
77                Fixed-MTRR synchronisation for SMP and use atomic operations
78 	       instead of spinlocks.
79     19980418   Richard Gooch <rgooch@atnf.csiro.au>
80 	       Differentiate different MTRR register classes for BIOS fixup.
81   v1.10
82     19980419   David Wragg <dpw@doc.ic.ac.uk>
83 	       Bug fix in variable MTRR synchronisation.
84   v1.11
85     19980419   Richard Gooch <rgooch@atnf.csiro.au>
86 	       Fixups for kernel 2.1.97.
87   v1.12
88     19980421   Richard Gooch <rgooch@atnf.csiro.au>
89 	       Safer synchronisation across CPUs when changing MTRRs.
90   v1.13
91     19980423   Richard Gooch <rgooch@atnf.csiro.au>
92 	       Bugfix for SMP systems without MTRR support.
93   v1.14
94     19980427   Richard Gooch <rgooch@atnf.csiro.au>
95 	       Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
96   v1.15
97     19980427   Richard Gooch <rgooch@atnf.csiro.au>
98 	       Use atomic bitops for setting SMP change mask.
99   v1.16
100     19980428   Richard Gooch <rgooch@atnf.csiro.au>
101 	       Removed spurious diagnostic message.
102   v1.17
103     19980429   Richard Gooch <rgooch@atnf.csiro.au>
104 	       Moved register-setting macros into this file.
105 	       Moved setup code from init/main.c to i386-specific areas.
106   v1.18
107     19980502   Richard Gooch <rgooch@atnf.csiro.au>
108 	       Moved MTRR detection outside conditionals in <mtrr_init>.
109   v1.19
110     19980502   Richard Gooch <rgooch@atnf.csiro.au>
111 	       Documentation improvement: mention Pentium II and AGP.
112   v1.20
113     19980521   Richard Gooch <rgooch@atnf.csiro.au>
114 	       Only manipulate interrupt enable flag on local CPU.
115 	       Allow enclosed uncachable regions.
116   v1.21
117     19980611   Richard Gooch <rgooch@atnf.csiro.au>
118 	       Always define <main_lock>.
119   v1.22
120     19980901   Richard Gooch <rgooch@atnf.csiro.au>
121 	       Removed module support in order to tidy up code.
122 	       Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
123 	       Created addition queue for prior to SMP commence.
124   v1.23
125     19980902   Richard Gooch <rgooch@atnf.csiro.au>
126 	       Ported patch to kernel 2.1.120-pre3.
127   v1.24
128     19980910   Richard Gooch <rgooch@atnf.csiro.au>
129 	       Removed sanity checks and addition queue: Linus prefers an OOPS.
130   v1.25
131     19981001   Richard Gooch <rgooch@atnf.csiro.au>
132 	       Fixed harmless compiler warning in include/asm-i386/mtrr.h
133 	       Fixed version numbering and history for v1.23 -> v1.24.
134   v1.26
135     19990118   Richard Gooch <rgooch@atnf.csiro.au>
136 	       Added devfs support.
137   v1.27
138     19990123   Richard Gooch <rgooch@atnf.csiro.au>
139 	       Changed locking to spin with reschedule.
140 	       Made use of new <smp_call_function>.
141   v1.28
142     19990201   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
143 	       Extended the driver to be able to use Cyrix style ARRs.
144     19990204   Richard Gooch <rgooch@atnf.csiro.au>
145 	       Restructured Cyrix support.
146   v1.29
147     19990204   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
148 	       Refined ARR support: enable MAPEN in set_mtrr_prepare()
149 	       and disable MAPEN in set_mtrr_done().
150     19990205   Richard Gooch <rgooch@atnf.csiro.au>
151 	       Minor cleanups.
152   v1.30
153     19990208   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
154                Protect plain 6x86s (and other processors without the
155                Page Global Enable feature) against accessing CR4 in
156                set_mtrr_prepare() and set_mtrr_done().
157     19990210   Richard Gooch <rgooch@atnf.csiro.au>
158 	       Turned <set_mtrr_up> and <get_mtrr> into function pointers.
159   v1.31
160     19990212   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
161                Major rewrite of cyrix_arr_init(): do not touch ARRs,
162                leave them as the BIOS have set them up.
163                Enable usage of all 8 ARRs.
164                Avoid multiplications by 3 everywhere and other
165                code clean ups/speed ups.
166     19990213   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
167                Set up other Cyrix processors identical to the boot cpu.
168                Since Cyrix don't support Intel APIC, this is l'art pour l'art.
169                Weigh ARRs by size:
170                If size <= 32M is given, set up ARR# we were given.
171                If size >  32M is given, set up ARR7 only if it is free,
172                fail otherwise.
173     19990214   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
174                Also check for size >= 256K if we are to set up ARR7,
175                mtrr_add() returns the value it gets from set_mtrr()
176     19990218   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
177                Remove Cyrix "coma bug" workaround from here.
178                Moved to linux/arch/i386/kernel/setup.c and
179                linux/include/asm-i386/bugs.h
180     19990228   Richard Gooch <rgooch@atnf.csiro.au>
181 	       Added MTRRIOC_KILL_ENTRY ioctl(2)
182 	       Trap for counter underflow in <mtrr_file_del>.
183 	       Trap for 4 MiB aligned regions for PPro, stepping <= 7.
184     19990301   Richard Gooch <rgooch@atnf.csiro.au>
185 	       Created <get_free_region> hook.
186     19990305   Richard Gooch <rgooch@atnf.csiro.au>
187 	       Temporarily disable AMD support now MTRR capability flag is set.
188   v1.32
189     19990308   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
190 	       Adjust my changes (19990212-19990218) to Richard Gooch's
191 	       latest changes. (19990228-19990305)
192   v1.33
193     19990309   Richard Gooch <rgooch@atnf.csiro.au>
194 	       Fixed typo in <printk> message.
195     19990310   Richard Gooch <rgooch@atnf.csiro.au>
196 	       Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
197   v1.34
198     19990511   Bart Hartgers <bart@etpmod.phys.tue.nl>
199 	       Support Centaur C6 MCR's.
200     19990512   Richard Gooch <rgooch@atnf.csiro.au>
201 	       Minor cleanups.
202   v1.35
203     19990707   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
204                Check whether ARR3 is protected in cyrix_get_free_region()
205                and mtrr_del(). The code won't attempt to delete or change it
206                from now on if the BIOS protected ARR3. It silently skips ARR3
207                in cyrix_get_free_region() or returns with an error code from
208                mtrr_del().
209     19990711   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
210                Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
211                if ARR3 isn't protected. This is needed because if SMM is active
212                and ARR3 isn't protected then deleting and setting ARR3 again
213                may lock up the processor. With SMM entirely disabled, it does
214                not happen.
215     19990812   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
216                Rearrange switch() statements so the driver accomodates to
217                the fact that the AMD Athlon handles its MTRRs the same way
218                as Intel does.
219     19990814   Zolt�n B�sz�rm�nyi <zboszor@mail.externet.hu>
220 	       Double check for Intel in mtrr_add()'s big switch() because
221 	       that revision check is only valid for Intel CPUs.
222     19990819   Alan Cox <alan@redhat.com>
223                Tested Zoltan's changes on a pre production Athlon - 100%
224                success.
225     19991008   Manfred Spraul <manfreds@colorfullife.com>
226     	       replaced spin_lock_reschedule() with a normal semaphore.
227   v1.36
228     20000221   Richard Gooch <rgooch@atnf.csiro.au>
229                Compile fix if procfs and devfs not enabled.
230 	       Formatting changes.
231   v1.37
232     20001109   H. Peter Anvin <hpa@zytor.com>
233 	       Use the new centralized CPU feature detects.
234 
235   v1.38
236     20010309   Dave Jones <davej@suse.de>
237 	       Add support for Cyrix III.
238 
239   v1.39
240     20010312   Dave Jones <davej@suse.de>
241                Ugh, I broke AMD support.
242 	       Reworked fix by Troels Walsted Hansen <troels@thule.no>
243 
244   v1.40
245     20010327   Dave Jones <davej@suse.de>
246 	       Adapted Cyrix III support to include VIA C3.
247 
248 */
249 #include <linux/types.h>
250 #include <linux/errno.h>
251 #include <linux/sched.h>
252 #include <linux/tty.h>
253 #include <linux/timer.h>
254 #include <linux/config.h>
255 #include <linux/kernel.h>
256 #include <linux/wait.h>
257 #include <linux/string.h>
258 #include <linux/slab.h>
259 #include <linux/ioport.h>
260 #include <linux/delay.h>
261 #include <linux/fs.h>
262 #include <linux/ctype.h>
263 #include <linux/proc_fs.h>
264 #include <linux/devfs_fs_kernel.h>
265 #include <linux/mm.h>
266 #include <linux/module.h>
267 #include <linux/pci.h>
268 #define MTRR_NEED_STRINGS
269 #include <asm/mtrr.h>
270 #include <linux/init.h>
271 #include <linux/smp.h>
272 #include <linux/smp_lock.h>
273 
274 #include <asm/uaccess.h>
275 #include <asm/io.h>
276 #include <asm/processor.h>
277 #include <asm/system.h>
278 #include <asm/pgtable.h>
279 #include <asm/segment.h>
280 #include <asm/bitops.h>
281 #include <asm/atomic.h>
282 #include <asm/msr.h>
283 
284 #include <asm/hardirq.h>
285 #include <linux/irq.h>
286 
287 #define MTRR_VERSION            "1.40 (20010327)"
288 
289 #define TRUE  1
290 #define FALSE 0
291 
292 /*
293  * The code assumes all processors support the same MTRR
294  * interface.  This is generally a good assumption, but could
295  * potentially be a problem.
296  */
297 enum mtrr_if_type {
298     MTRR_IF_NONE,		/* No MTRRs supported */
299     MTRR_IF_INTEL,		/* Intel (P6) standard MTRRs */
300     MTRR_IF_AMD_K6,		/* AMD pre-Athlon MTRRs */
301     MTRR_IF_CYRIX_ARR,		/* Cyrix ARRs */
302     MTRR_IF_CENTAUR_MCR,	/* Centaur MCRs */
303 } mtrr_if = MTRR_IF_NONE;
304 
305 static __initdata char *mtrr_if_name[] = {
306     "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
307 };
308 
309 #define MTRRcap_MSR     0x0fe
310 #define MTRRdefType_MSR 0x2ff
311 
312 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
313 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
314 
315 #define NUM_FIXED_RANGES 88
316 #define MTRRfix64K_00000_MSR 0x250
317 #define MTRRfix16K_80000_MSR 0x258
318 #define MTRRfix16K_A0000_MSR 0x259
319 #define MTRRfix4K_C0000_MSR 0x268
320 #define MTRRfix4K_C8000_MSR 0x269
321 #define MTRRfix4K_D0000_MSR 0x26a
322 #define MTRRfix4K_D8000_MSR 0x26b
323 #define MTRRfix4K_E0000_MSR 0x26c
324 #define MTRRfix4K_E8000_MSR 0x26d
325 #define MTRRfix4K_F0000_MSR 0x26e
326 #define MTRRfix4K_F8000_MSR 0x26f
327 
328 #ifdef CONFIG_SMP
329 #  define MTRR_CHANGE_MASK_FIXED     0x01
330 #  define MTRR_CHANGE_MASK_VARIABLE  0x02
331 #  define MTRR_CHANGE_MASK_DEFTYPE   0x04
332 #endif
333 
334 /* In the Intel processor's MTRR interface, the MTRR type is always held in
335    an 8 bit field: */
336 typedef u8 mtrr_type;
337 
338 #define LINE_SIZE      80
339 #define JIFFIE_TIMEOUT 100
340 
341 #ifdef CONFIG_SMP
342 #  define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
343 #else
344 #  define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
345 						       TRUE)
346 #endif
347 
348 #if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS)
349 # define USERSPACE_INTERFACE
350 #endif
351 
352 #ifndef USERSPACE_INTERFACE
353 #  define compute_ascii() while (0)
354 #endif
355 
356 #ifdef USERSPACE_INTERFACE
357 static char *ascii_buffer;
358 static unsigned int ascii_buf_bytes;
359 #endif
360 static unsigned int *usage_table;
361 static DECLARE_MUTEX(main_lock);
362 
363 /*  Private functions  */
364 #ifdef USERSPACE_INTERFACE
365 static void compute_ascii (void);
366 #endif
367 
368 
369 struct set_mtrr_context
370 {
371     unsigned long flags;
372     unsigned long deftype_lo;
373     unsigned long deftype_hi;
374     unsigned long cr4val;
375     unsigned long ccr3;
376 };
377 
378 static int arr3_protected;
379 
380 /*  Put the processor into a state where MTRRs can be safely set  */
set_mtrr_prepare_save(struct set_mtrr_context * ctxt)381 static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
382 {
383     /*  Disable interrupts locally  */
384     __save_flags (ctxt->flags); __cli ();
385 
386     if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
387 	 return;
388 
389     /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
390     if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) {
391 	ctxt->cr4val = read_cr4();
392 	write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7));
393     }
394 
395     /*  Disable and flush caches. Note that wbinvd flushes the TLBs as
396 	a side-effect  */
397     {
398 	unsigned int cr0 = read_cr0() | 0x40000000;
399 	wbinvd();
400 	write_cr0( cr0 );
401 	wbinvd();
402     }
403 
404     if ( mtrr_if == MTRR_IF_INTEL ) {
405 	/*  Save MTRR state */
406 	rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
407     } else {
408 	/* Cyrix ARRs - everything else were excluded at the top */
409 	ctxt->ccr3 = getCx86 (CX86_CCR3);
410     }
411 }   /*  End Function set_mtrr_prepare_save  */
412 
set_mtrr_disable(struct set_mtrr_context * ctxt)413 static void set_mtrr_disable (struct set_mtrr_context *ctxt)
414 {
415     if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
416 	 return;
417 
418     if ( mtrr_if == MTRR_IF_INTEL ) {
419 	/*  Disable MTRRs, and set the default type to uncached  */
420 	wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
421     } else {
422 	/* Cyrix ARRs - everything else were excluded at the top */
423 	setCx86 (CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
424     }
425 }   /*  End Function set_mtrr_disable  */
426 
427 /*  Restore the processor after a set_mtrr_prepare  */
set_mtrr_done(struct set_mtrr_context * ctxt)428 static void set_mtrr_done (struct set_mtrr_context *ctxt)
429 {
430     if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR ) {
431 	 __restore_flags (ctxt->flags);
432 	 return;
433     }
434 
435     /*  Flush caches and TLBs  */
436     wbinvd();
437 
438     /*  Restore MTRRdefType  */
439     if ( mtrr_if == MTRR_IF_INTEL ) {
440 	/* Intel (P6) standard MTRRs */
441 	wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
442     } else {
443 	/* Cyrix ARRs - everything else was excluded at the top */
444 	setCx86 (CX86_CCR3, ctxt->ccr3);
445     }
446 
447     /*  Enable caches  */
448     write_cr0( read_cr0() & 0xbfffffff );
449 
450     /*  Restore value of CR4  */
451     if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
452 	write_cr4(ctxt->cr4val);
453 
454     /*  Re-enable interrupts locally (if enabled previously)  */
455     __restore_flags (ctxt->flags);
456 }   /*  End Function set_mtrr_done  */
457 
458 /*  This function returns the number of variable MTRRs  */
get_num_var_ranges(void)459 static unsigned int get_num_var_ranges (void)
460 {
461     unsigned long config, dummy;
462 
463     switch ( mtrr_if )
464     {
465     case MTRR_IF_INTEL:
466 	rdmsr (MTRRcap_MSR, config, dummy);
467 	return (config & 0xff);
468     case MTRR_IF_AMD_K6:
469 	return 2;
470     case MTRR_IF_CYRIX_ARR:
471 	return 8;
472     case MTRR_IF_CENTAUR_MCR:
473 	return 8;
474     default:
475 	return 0;
476     }
477 }   /*  End Function get_num_var_ranges  */
478 
479 /*  Returns non-zero if we have the write-combining memory type  */
have_wrcomb(void)480 static int have_wrcomb (void)
481 {
482     unsigned long config, dummy;
483     struct pci_dev *dev = NULL;
484 
485    /* ServerWorks LE chipsets have problems with write-combining
486       Don't allow it and leave room for other chipsets to be tagged */
487 
488 	if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
489 		if ((dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
490 			(dev->device == PCI_DEVICE_ID_SERVERWORKS_LE)) {
491 		printk (KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
492 		return 0;
493 		}
494 	}
495 	/* Intel 450NX errata # 23. Non ascending cachline evictions to
496 	   write combining memory may resulting in data corruption */
497 	dev = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, NULL);
498 	if(dev)
499 	{
500 		printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
501 		return 0;
502 	}
503 
504     switch ( mtrr_if )
505     {
506     case MTRR_IF_INTEL:
507 	rdmsr (MTRRcap_MSR, config, dummy);
508 	return (config & (1<<10));
509 	return 1;
510     case MTRR_IF_AMD_K6:
511     case MTRR_IF_CENTAUR_MCR:
512     case MTRR_IF_CYRIX_ARR:
513 	return 1;
514     default:
515 	return 0;
516     }
517 }   /*  End Function have_wrcomb  */
518 
519 static u32 size_or_mask, size_and_mask;
520 
intel_get_mtrr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)521 static void intel_get_mtrr (unsigned int reg, unsigned long *base,
522 			    unsigned long *size, mtrr_type *type)
523 {
524     unsigned long mask_lo, mask_hi, base_lo, base_hi;
525 
526     rdmsr (MTRRphysMask_MSR(reg), mask_lo, mask_hi);
527     if ( (mask_lo & 0x800) == 0 )
528     {
529 	/*  Invalid (i.e. free) range  */
530 	*base = 0;
531 	*size = 0;
532 	*type = 0;
533 	return;
534     }
535 
536     rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
537 
538     /* Work out the shifted address mask. */
539     mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
540 		| mask_lo >> PAGE_SHIFT;
541 
542     /* This works correctly if size is a power of two, i.e. a
543        contiguous range. */
544      *size = -mask_lo;
545      *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
546      *type = base_lo & 0xff;
547 }   /*  End Function intel_get_mtrr  */
548 
cyrix_get_arr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)549 static void cyrix_get_arr (unsigned int reg, unsigned long *base,
550 			   unsigned long *size, mtrr_type *type)
551 {
552     unsigned long flags;
553     unsigned char arr, ccr3, rcr, shift;
554 
555     arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
556 
557     /* Save flags and disable interrupts */
558     __save_flags (flags); __cli ();
559 
560     ccr3 = getCx86 (CX86_CCR3);
561     setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10);		/* enable MAPEN */
562     ((unsigned char *) base)[3]  = getCx86 (arr);
563     ((unsigned char *) base)[2]  = getCx86 (arr+1);
564     ((unsigned char *) base)[1]  = getCx86 (arr+2);
565     rcr = getCx86(CX86_RCR_BASE + reg);
566     setCx86 (CX86_CCR3, ccr3);				/* disable MAPEN */
567 
568     /* Enable interrupts if it was enabled previously */
569     __restore_flags (flags);
570     shift = ((unsigned char *) base)[1] & 0x0f;
571     *base >>= PAGE_SHIFT;
572 
573     /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
574      * Note: shift==0xf means 4G, this is unsupported.
575      */
576     if (shift)
577       *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
578     else
579       *size = 0;
580 
581     /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
582     if (reg < 7)
583     {
584 	switch (rcr)
585 	{
586 	  case  1: *type = MTRR_TYPE_UNCACHABLE; break;
587 	  case  8: *type = MTRR_TYPE_WRBACK;     break;
588 	  case  9: *type = MTRR_TYPE_WRCOMB;     break;
589 	  case 24:
590 	  default: *type = MTRR_TYPE_WRTHROUGH;  break;
591 	}
592     } else
593     {
594 	switch (rcr)
595 	{
596 	  case  0: *type = MTRR_TYPE_UNCACHABLE; break;
597 	  case  8: *type = MTRR_TYPE_WRCOMB;     break;
598 	  case  9: *type = MTRR_TYPE_WRBACK;     break;
599 	  case 25:
600 	  default: *type = MTRR_TYPE_WRTHROUGH;  break;
601 	}
602     }
603 }   /*  End Function cyrix_get_arr  */
604 
amd_get_mtrr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)605 static void amd_get_mtrr (unsigned int reg, unsigned long *base,
606 			  unsigned long *size, mtrr_type *type)
607 {
608     unsigned long low, high;
609 
610     rdmsr (MSR_K6_UWCCR, low, high);
611     /*  Upper dword is region 1, lower is region 0  */
612     if (reg == 1) low = high;
613     /*  The base masks off on the right alignment  */
614     *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
615     *type = 0;
616     if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
617     if (low & 2) *type = MTRR_TYPE_WRCOMB;
618     if ( !(low & 3) )
619     {
620 	*size = 0;
621 	return;
622     }
623     /*
624      *	This needs a little explaining. The size is stored as an
625      *	inverted mask of bits of 128K granularity 15 bits long offset
626      *	2 bits
627      *
628      *	So to get a size we do invert the mask and add 1 to the lowest
629      *	mask bit (4 as its 2 bits in). This gives us a size we then shift
630      *	to turn into 128K blocks
631      *
632      *	eg		111 1111 1111 1100      is 512K
633      *
634      *	invert		000 0000 0000 0011
635      *	+1		000 0000 0000 0100
636      *	*128K	...
637      */
638     low = (~low) & 0x1FFFC;
639     *size = (low + 4) << (15 - PAGE_SHIFT);
640     return;
641 }   /*  End Function amd_get_mtrr  */
642 
643 static struct
644 {
645     unsigned long high;
646     unsigned long low;
647 } centaur_mcr[8];
648 
649 static u8 centaur_mcr_reserved;
650 static u8 centaur_mcr_type;		/* 0 for winchip, 1 for winchip2 */
651 
652 /*
653  *	Report boot time MCR setups
654  */
655 
mtrr_centaur_report_mcr(int mcr,u32 lo,u32 hi)656 void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
657 {
658 	centaur_mcr[mcr].low = lo;
659 	centaur_mcr[mcr].high = hi;
660 }
661 
centaur_get_mcr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)662 static void centaur_get_mcr (unsigned int reg, unsigned long *base,
663 			     unsigned long *size, mtrr_type *type)
664 {
665     *base = centaur_mcr[reg].high >> PAGE_SHIFT;
666     *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
667     *type = MTRR_TYPE_WRCOMB;	/*  If it is there, it is write-combining  */
668     if(centaur_mcr_type==1 && ((centaur_mcr[reg].low&31)&2))
669     	*type = MTRR_TYPE_UNCACHABLE;
670     if(centaur_mcr_type==1 && (centaur_mcr[reg].low&31)==25)
671     	*type = MTRR_TYPE_WRBACK;
672     if(centaur_mcr_type==0 && (centaur_mcr[reg].low&31)==31)
673     	*type = MTRR_TYPE_WRBACK;
674 
675 }   /*  End Function centaur_get_mcr  */
676 
677 static void (*get_mtrr) (unsigned int reg, unsigned long *base,
678 			 unsigned long *size, mtrr_type *type);
679 
intel_set_mtrr_up(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type,int do_safe)680 static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
681 			       unsigned long size, mtrr_type type, int do_safe)
682 /*  [SUMMARY] Set variable MTRR register on the local CPU.
683     <reg> The register to set.
684     <base> The base address of the region.
685     <size> The size of the region. If this is 0 the region is disabled.
686     <type> The type of the region.
687     <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
688     be done externally.
689     [RETURNS] Nothing.
690 */
691 {
692     struct set_mtrr_context ctxt;
693 
694     if (do_safe) {
695 	set_mtrr_prepare_save (&ctxt);
696 	set_mtrr_disable (&ctxt);
697 	}
698     if (size == 0)
699     {
700 	/* The invalid bit is kept in the mask, so we simply clear the
701 	   relevant mask register to disable a range. */
702 	wrmsr (MTRRphysMask_MSR (reg), 0, 0);
703     }
704     else
705     {
706 	wrmsr (MTRRphysBase_MSR (reg), base << PAGE_SHIFT | type,
707 		(base & size_and_mask) >> (32 - PAGE_SHIFT));
708 	wrmsr (MTRRphysMask_MSR (reg), -size << PAGE_SHIFT | 0x800,
709 		(-size & size_and_mask) >> (32 - PAGE_SHIFT));
710     }
711     if (do_safe) set_mtrr_done (&ctxt);
712 }   /*  End Function intel_set_mtrr_up  */
713 
cyrix_set_arr_up(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type,int do_safe)714 static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
715 			      unsigned long size, mtrr_type type, int do_safe)
716 {
717     struct set_mtrr_context ctxt;
718     unsigned char arr, arr_type, arr_size;
719 
720     arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
721 
722     /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
723     if (reg >= 7)
724 	size >>= 6;
725 
726     size &= 0x7fff; /* make sure arr_size <= 14 */
727     for(arr_size = 0; size; arr_size++, size >>= 1);
728 
729     if (reg<7)
730     {
731 	switch (type) {
732 	  case MTRR_TYPE_UNCACHABLE:	arr_type =  1; break;
733 	  case MTRR_TYPE_WRCOMB:		arr_type =  9; break;
734 	  case MTRR_TYPE_WRTHROUGH:	arr_type = 24; break;
735 	  default:			arr_type =  8; break;
736 	}
737     }
738     else
739     {
740 	switch (type)
741 	{
742 	  case MTRR_TYPE_UNCACHABLE:	arr_type =  0; break;
743 	  case MTRR_TYPE_WRCOMB:		arr_type =  8; break;
744 	  case MTRR_TYPE_WRTHROUGH:	arr_type = 25; break;
745 	  default:			arr_type =  9; break;
746 	}
747     }
748 
749     if (do_safe) {
750 	set_mtrr_prepare_save (&ctxt);
751 	set_mtrr_disable (&ctxt);
752     }
753     base <<= PAGE_SHIFT;
754     setCx86(arr,    ((unsigned char *) &base)[3]);
755     setCx86(arr+1,  ((unsigned char *) &base)[2]);
756     setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
757     setCx86(CX86_RCR_BASE + reg, arr_type);
758     if (do_safe) set_mtrr_done (&ctxt);
759 }   /*  End Function cyrix_set_arr_up  */
760 
amd_set_mtrr_up(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type,int do_safe)761 static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
762 			     unsigned long size, mtrr_type type, int do_safe)
763 /*  [SUMMARY] Set variable MTRR register on the local CPU.
764     <reg> The register to set.
765     <base> The base address of the region.
766     <size> The size of the region. If this is 0 the region is disabled.
767     <type> The type of the region.
768     <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
769     be done externally.
770     [RETURNS] Nothing.
771 */
772 {
773     u32 regs[2];
774     struct set_mtrr_context ctxt;
775 
776     if (do_safe) {
777 	set_mtrr_prepare_save (&ctxt);
778 	set_mtrr_disable (&ctxt);
779     }
780     /*
781      *	Low is MTRR0 , High MTRR 1
782      */
783     rdmsr (MSR_K6_UWCCR, regs[0], regs[1]);
784     /*
785      *	Blank to disable
786      */
787     if (size == 0)
788 	regs[reg] = 0;
789     else
790 	/* Set the register to the base, the type (off by one) and an
791 	   inverted bitmask of the size The size is the only odd
792 	   bit. We are fed say 512K We invert this and we get 111 1111
793 	   1111 1011 but if you subtract one and invert you get the
794 	   desired 111 1111 1111 1100 mask
795 
796 	   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
797 	regs[reg] = (-size>>(15-PAGE_SHIFT) & 0x0001FFFC)
798 				| (base<<PAGE_SHIFT) | (type+1);
799 
800     /*
801      *	The writeback rule is quite specific. See the manual. Its
802      *	disable local interrupts, write back the cache, set the mtrr
803      */
804 	wbinvd();
805 	wrmsr (MSR_K6_UWCCR, regs[0], regs[1]);
806     if (do_safe) set_mtrr_done (&ctxt);
807 }   /*  End Function amd_set_mtrr_up  */
808 
809 
centaur_set_mcr_up(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type,int do_safe)810 static void centaur_set_mcr_up (unsigned int reg, unsigned long base,
811 				unsigned long size, mtrr_type type,
812 				int do_safe)
813 {
814     struct set_mtrr_context ctxt;
815     unsigned long low, high;
816 
817     if (do_safe) {
818 	set_mtrr_prepare_save (&ctxt);
819 	set_mtrr_disable (&ctxt);
820     }
821     if (size == 0)
822     {
823         /*  Disable  */
824         high = low = 0;
825     }
826     else
827     {
828 	high = base << PAGE_SHIFT;
829 	if(centaur_mcr_type == 0)
830 		low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
831 	else
832 	{
833 		if(type == MTRR_TYPE_UNCACHABLE)
834 			low = -size << PAGE_SHIFT | 0x02;	/* NC */
835 		else
836 			low = -size << PAGE_SHIFT | 0x09;	/* WWO,WC */
837 	}
838     }
839     centaur_mcr[reg].high = high;
840     centaur_mcr[reg].low = low;
841     wrmsr (MSR_IDT_MCR0 + reg, low, high);
842     if (do_safe) set_mtrr_done( &ctxt );
843 }   /*  End Function centaur_set_mtrr_up  */
844 
845 static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
846 			    unsigned long size, mtrr_type type,
847 			    int do_safe);
848 
849 #ifdef CONFIG_SMP
850 
851 struct mtrr_var_range
852 {
853     unsigned long base_lo;
854     unsigned long base_hi;
855     unsigned long mask_lo;
856     unsigned long mask_hi;
857 };
858 
859 
860 /*  Get the MSR pair relating to a var range  */
get_mtrr_var_range(unsigned int index,struct mtrr_var_range * vr)861 static void __init get_mtrr_var_range (unsigned int index,
862 					   struct mtrr_var_range *vr)
863 {
864     rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
865     rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
866 }   /*  End Function get_mtrr_var_range  */
867 
868 
869 /*  Set the MSR pair relating to a var range. Returns TRUE if
870     changes are made  */
set_mtrr_var_range_testing(unsigned int index,struct mtrr_var_range * vr)871 static int __init set_mtrr_var_range_testing (unsigned int index,
872 						  struct mtrr_var_range *vr)
873 {
874     unsigned int lo, hi;
875     int changed = FALSE;
876 
877     rdmsr(MTRRphysBase_MSR(index), lo, hi);
878     if ( (vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
879 	 || (vr->base_hi & 0xfUL) != (hi & 0xfUL) )
880     {
881 	wrmsr (MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
882 	changed = TRUE;
883     }
884 
885     rdmsr (MTRRphysMask_MSR(index), lo, hi);
886 
887     if ( (vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
888 	 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL) )
889     {
890 	wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
891 	changed = TRUE;
892     }
893     return changed;
894 }   /*  End Function set_mtrr_var_range_testing  */
895 
get_fixed_ranges(mtrr_type * frs)896 static void __init get_fixed_ranges(mtrr_type *frs)
897 {
898     unsigned long *p = (unsigned long *)frs;
899     int i;
900 
901     rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
902 
903     for (i = 0; i < 2; i++)
904 	rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
905     for (i = 0; i < 8; i++)
906 	rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
907 }   /*  End Function get_fixed_ranges  */
908 
set_fixed_ranges_testing(mtrr_type * frs)909 static int __init set_fixed_ranges_testing(mtrr_type *frs)
910 {
911     unsigned long *p = (unsigned long *)frs;
912     int changed = FALSE;
913     int i;
914     unsigned long lo, hi;
915 
916     rdmsr(MTRRfix64K_00000_MSR, lo, hi);
917     if (p[0] != lo || p[1] != hi)
918     {
919 	wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
920 	changed = TRUE;
921     }
922 
923     for (i = 0; i < 2; i++)
924     {
925 	rdmsr (MTRRfix16K_80000_MSR + i, lo, hi);
926 	if (p[2 + i*2] != lo || p[3 + i*2] != hi)
927 	{
928 	    wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
929 	    changed = TRUE;
930 	}
931     }
932 
933     for (i = 0; i < 8; i++)
934     {
935 	rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi);
936 	if (p[6 + i*2] != lo || p[7 + i*2] != hi)
937 	{
938 	    wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
939 	    changed = TRUE;
940 	}
941     }
942     return changed;
943 }   /*  End Function set_fixed_ranges_testing  */
944 
945 struct mtrr_state
946 {
947     unsigned int num_var_ranges;
948     struct mtrr_var_range *var_ranges;
949     mtrr_type fixed_ranges[NUM_FIXED_RANGES];
950     unsigned char enabled;
951     mtrr_type def_type;
952 };
953 
954 
955 /*  Grab all of the MTRR state for this CPU into *state  */
get_mtrr_state(struct mtrr_state * state)956 static void __init get_mtrr_state(struct mtrr_state *state)
957 {
958     unsigned int nvrs, i;
959     struct mtrr_var_range *vrs;
960     unsigned long lo, dummy;
961 
962     nvrs = state->num_var_ranges = get_num_var_ranges();
963     vrs = state->var_ranges
964               = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
965     if (vrs == NULL)
966 	nvrs = state->num_var_ranges = 0;
967 
968     for (i = 0; i < nvrs; i++)
969 	get_mtrr_var_range (i, &vrs[i]);
970     get_fixed_ranges (state->fixed_ranges);
971 
972     rdmsr (MTRRdefType_MSR, lo, dummy);
973     state->def_type = (lo & 0xff);
974     state->enabled = (lo & 0xc00) >> 10;
975 }   /*  End Function get_mtrr_state  */
976 
977 
978 /*  Free resources associated with a struct mtrr_state  */
finalize_mtrr_state(struct mtrr_state * state)979 static void __init finalize_mtrr_state(struct mtrr_state *state)
980 {
981     if (state->var_ranges) kfree (state->var_ranges);
982 }   /*  End Function finalize_mtrr_state  */
983 
984 
set_mtrr_state(struct mtrr_state * state,struct set_mtrr_context * ctxt)985 static unsigned long __init set_mtrr_state (struct mtrr_state *state,
986 						struct set_mtrr_context *ctxt)
987 /*  [SUMMARY] Set the MTRR state for this CPU.
988     <state> The MTRR state information to read.
989     <ctxt> Some relevant CPU context.
990     [NOTE] The CPU must already be in a safe state for MTRR changes.
991     [RETURNS] 0 if no changes made, else a mask indication what was changed.
992 */
993 {
994     unsigned int i;
995     unsigned long change_mask = 0;
996 
997     for (i = 0; i < state->num_var_ranges; i++)
998 	if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
999 	    change_mask |= MTRR_CHANGE_MASK_VARIABLE;
1000 
1001     if ( set_fixed_ranges_testing(state->fixed_ranges) )
1002 	change_mask |= MTRR_CHANGE_MASK_FIXED;
1003     /*  Set_mtrr_restore restores the old value of MTRRdefType,
1004 	so to set it we fiddle with the saved value  */
1005     if ( (ctxt->deftype_lo & 0xff) != state->def_type
1006 	 || ( (ctxt->deftype_lo & 0xc00) >> 10 ) != state->enabled)
1007     {
1008 	ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
1009 	change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
1010     }
1011 
1012     return change_mask;
1013 }   /*  End Function set_mtrr_state  */
1014 
1015 
1016 static atomic_t undone_count;
1017 static volatile int wait_barrier_mtrr_disable = FALSE;
1018 static volatile int wait_barrier_execute = FALSE;
1019 static volatile int wait_barrier_cache_enable = FALSE;
1020 
1021 struct set_mtrr_data
1022 {
1023     unsigned long smp_base;
1024     unsigned long smp_size;
1025     unsigned int smp_reg;
1026     mtrr_type smp_type;
1027 };
1028 
ipi_handler(void * info)1029 static void ipi_handler (void *info)
1030 /*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
1031     [RETURNS] Nothing.
1032 */
1033 {
1034     struct set_mtrr_data *data = info;
1035     struct set_mtrr_context ctxt;
1036     set_mtrr_prepare_save (&ctxt);
1037     /*  Notify master that I've flushed and disabled my cache  */
1038     atomic_dec (&undone_count);
1039     while (wait_barrier_mtrr_disable) { rep_nop(); barrier(); }
1040     set_mtrr_disable (&ctxt);
1041     /*  Notify master that I've flushed and disabled my cache  */
1042     atomic_dec (&undone_count);
1043     while (wait_barrier_execute) { rep_nop(); barrier(); }
1044     /*  The master has cleared me to execute  */
1045     (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
1046 		    data->smp_type, FALSE);
1047     /*  Notify master CPU that I've executed the function  */
1048     atomic_dec (&undone_count);
1049     /*  Wait for master to clear me to enable cache and return  */
1050     while (wait_barrier_cache_enable) { rep_nop(); barrier(); }
1051     set_mtrr_done (&ctxt);
1052 }   /*  End Function ipi_handler  */
1053 
set_mtrr_smp(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type)1054 static void set_mtrr_smp (unsigned int reg, unsigned long base,
1055 			  unsigned long size, mtrr_type type)
1056 {
1057     struct set_mtrr_data data;
1058     struct set_mtrr_context ctxt;
1059 
1060     data.smp_reg = reg;
1061     data.smp_base = base;
1062     data.smp_size = size;
1063     data.smp_type = type;
1064     wait_barrier_mtrr_disable = TRUE;
1065     wait_barrier_execute = TRUE;
1066     wait_barrier_cache_enable = TRUE;
1067     atomic_set (&undone_count, smp_num_cpus - 1);
1068     /*  Start the ball rolling on other CPUs  */
1069     if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
1070 	panic ("mtrr: timed out waiting for other CPUs\n");
1071     /* Flush and disable the local CPU's cache */
1072     set_mtrr_prepare_save (&ctxt);
1073     /*  Wait for all other CPUs to flush and disable their caches  */
1074     while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
1075     /* Set up for completion wait and then release other CPUs to change MTRRs*/
1076     atomic_set (&undone_count, smp_num_cpus - 1);
1077     wait_barrier_mtrr_disable = FALSE;
1078     set_mtrr_disable (&ctxt);
1079 
1080     /*  Wait for all other CPUs to flush and disable their caches  */
1081     while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
1082     /* Set up for completion wait and then release other CPUs to change MTRRs*/
1083     atomic_set (&undone_count, smp_num_cpus - 1);
1084     wait_barrier_execute = FALSE;
1085     (*set_mtrr_up) (reg, base, size, type, FALSE);
1086     /*  Now wait for other CPUs to complete the function  */
1087     while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
1088     /*  Now all CPUs should have finished the function. Release the barrier to
1089 	allow them to re-enable their caches and return from their interrupt,
1090 	then enable the local cache and return  */
1091     wait_barrier_cache_enable = FALSE;
1092     set_mtrr_done (&ctxt);
1093 }   /*  End Function set_mtrr_smp  */
1094 
1095 
1096 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
mtrr_state_warn(unsigned long mask)1097 static void __init mtrr_state_warn(unsigned long mask)
1098 {
1099     if (!mask) return;
1100     if (mask & MTRR_CHANGE_MASK_FIXED)
1101 	printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
1102     if (mask & MTRR_CHANGE_MASK_VARIABLE)
1103 	printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
1104     if (mask & MTRR_CHANGE_MASK_DEFTYPE)
1105 	printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
1106     printk ("mtrr: probably your BIOS does not setup all CPUs\n");
1107 }   /*  End Function mtrr_state_warn  */
1108 
1109 #endif  /*  CONFIG_SMP  */
1110 
attrib_to_str(int x)1111 static char *attrib_to_str (int x)
1112 {
1113     return (x <= 6) ? mtrr_strings[x] : "?";
1114 }   /*  End Function attrib_to_str  */
1115 
init_table(void)1116 static void init_table (void)
1117 {
1118     int i, max;
1119 
1120     max = get_num_var_ranges ();
1121     if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
1122 	 == NULL )
1123     {
1124 	printk ("mtrr: could not allocate\n");
1125 	return;
1126     }
1127     for (i = 0; i < max; i++) usage_table[i] = 1;
1128 #ifdef USERSPACE_INTERFACE
1129     if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
1130     {
1131 	printk ("mtrr: could not allocate\n");
1132 	return;
1133     }
1134     ascii_buf_bytes = 0;
1135     compute_ascii ();
1136 #endif
1137 }   /*  End Function init_table  */
1138 
generic_get_free_region(unsigned long base,unsigned long size)1139 static int generic_get_free_region (unsigned long base, unsigned long size)
1140 /*  [SUMMARY] Get a free MTRR.
1141     <base> The starting (base) address of the region.
1142     <size> The size (in bytes) of the region.
1143     [RETURNS] The index of the region on success, else -1 on error.
1144 */
1145 {
1146     int i, max;
1147     mtrr_type ltype;
1148     unsigned long lbase, lsize;
1149 
1150     max = get_num_var_ranges ();
1151     for (i = 0; i < max; ++i)
1152     {
1153 	(*get_mtrr) (i, &lbase, &lsize, &ltype);
1154 	if (lsize == 0) return i;
1155     }
1156     return -ENOSPC;
1157 }   /*  End Function generic_get_free_region  */
1158 
centaur_get_free_region(unsigned long base,unsigned long size)1159 static int centaur_get_free_region (unsigned long base, unsigned long size)
1160 /*  [SUMMARY] Get a free MTRR.
1161     <base> The starting (base) address of the region.
1162     <size> The size (in bytes) of the region.
1163     [RETURNS] The index of the region on success, else -1 on error.
1164 */
1165 {
1166     int i, max;
1167     mtrr_type ltype;
1168     unsigned long lbase, lsize;
1169 
1170     max = get_num_var_ranges ();
1171     for (i = 0; i < max; ++i)
1172     {
1173     	if(centaur_mcr_reserved & (1<<i))
1174     		continue;
1175 	(*get_mtrr) (i, &lbase, &lsize, &ltype);
1176 	if (lsize == 0) return i;
1177     }
1178     return -ENOSPC;
1179 }   /*  End Function generic_get_free_region  */
1180 
cyrix_get_free_region(unsigned long base,unsigned long size)1181 static int cyrix_get_free_region (unsigned long base, unsigned long size)
1182 /*  [SUMMARY] Get a free ARR.
1183     <base> The starting (base) address of the region.
1184     <size> The size (in bytes) of the region.
1185     [RETURNS] The index of the region on success, else -1 on error.
1186 */
1187 {
1188     int i;
1189     mtrr_type ltype;
1190     unsigned long lbase, lsize;
1191 
1192     /* If we are to set up a region >32M then look at ARR7 immediately */
1193     if (size > 0x2000)
1194     {
1195 	cyrix_get_arr (7, &lbase, &lsize, &ltype);
1196 	if (lsize == 0) return 7;
1197 	/*  Else try ARR0-ARR6 first  */
1198     }
1199     else
1200     {
1201 	for (i = 0; i < 7; i++)
1202 	{
1203 	    cyrix_get_arr (i, &lbase, &lsize, &ltype);
1204 	    if ((i == 3) && arr3_protected) continue;
1205 	    if (lsize == 0) return i;
1206 	}
1207 	/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
1208 	cyrix_get_arr (i, &lbase, &lsize, &ltype);
1209 	if ((lsize == 0) && (size >= 0x40)) return i;
1210     }
1211     return -ENOSPC;
1212 }   /*  End Function cyrix_get_free_region  */
1213 
1214 static int (*get_free_region) (unsigned long base,
1215 			       unsigned long size) = generic_get_free_region;
1216 
1217 /**
1218  *	mtrr_add_page - Add a memory type region
1219  *	@base: Physical base address of region in pages (4 KB)
1220  *	@size: Physical size of region in pages (4 KB)
1221  *	@type: Type of MTRR desired
1222  *	@increment: If this is true do usage counting on the region
1223  *
1224  *	Memory type region registers control the caching on newer Intel and
1225  *	non Intel processors. This function allows drivers to request an
1226  *	MTRR is added. The details and hardware specifics of each processor's
1227  *	implementation are hidden from the caller, but nevertheless the
1228  *	caller should expect to need to provide a power of two size on an
1229  *	equivalent power of two boundary.
1230  *
1231  *	If the region cannot be added either because all regions are in use
1232  *	or the CPU cannot support it a negative value is returned. On success
1233  *	the register number for this entry is returned, but should be treated
1234  *	as a cookie only.
1235  *
1236  *	On a multiprocessor machine the changes are made to all processors.
1237  *	This is required on x86 by the Intel processors.
1238  *
1239  *	The available types are
1240  *
1241  *	%MTRR_TYPE_UNCACHABLE	-	No caching
1242  *
1243  *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
1244  *
1245  *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
1246  *
1247  *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
1248  *
1249  *	BUGS: Needs a quiet flag for the cases where drivers do not mind
1250  *	failures and do not wish system log messages to be sent.
1251  */
1252 
mtrr_add_page(unsigned long base,unsigned long size,unsigned int type,char increment)1253 int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, char increment)
1254 {
1255 /*  [SUMMARY] Add an MTRR entry.
1256     <base> The starting (base, in pages) address of the region.
1257     <size> The size of the region. (in pages)
1258     <type> The type of the new region.
1259     <increment> If true and the region already exists, the usage count will be
1260     incremented.
1261     [RETURNS] The MTRR register on success, else a negative number indicating
1262     the error code.
1263     [NOTE] This routine uses a spinlock.
1264 */
1265     int i, max;
1266     mtrr_type ltype;
1267     unsigned long lbase, lsize, last;
1268 
1269     switch ( mtrr_if )
1270     {
1271     case MTRR_IF_NONE:
1272 	return -ENXIO;		/* No MTRRs whatsoever */
1273 
1274     case MTRR_IF_AMD_K6:
1275 	/* Apply the K6 block alignment and size rules
1276 	   In order
1277 	   o Uncached or gathering only
1278 	   o 128K or bigger block
1279 	   o Power of 2 block
1280 	   o base suitably aligned to the power
1281 	*/
1282 	if ( type > MTRR_TYPE_WRCOMB || size < (1 << (17-PAGE_SHIFT)) ||
1283 	     (size & ~(size-1))-size || ( base & (size-1) ) )
1284 	    return -EINVAL;
1285 	break;
1286 
1287     case MTRR_IF_INTEL:
1288 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
1289 	    and not touch 0x70000000->0x7003FFFF */
1290 	if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1291 	     boot_cpu_data.x86 == 6 &&
1292 	     boot_cpu_data.x86_model == 1 &&
1293 	     boot_cpu_data.x86_mask <= 7 )
1294 	{
1295 	    if ( base & ((1 << (22-PAGE_SHIFT))-1) )
1296 	    {
1297 		printk (KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
1298 		return -EINVAL;
1299 	    }
1300 	    if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
1301 		 (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK))
1302 	    {
1303 		printk (KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
1304 	        return -EINVAL;
1305 	    }
1306 	}
1307 	/* Fall through */
1308 
1309     case MTRR_IF_CYRIX_ARR:
1310     case MTRR_IF_CENTAUR_MCR:
1311         if ( mtrr_if == MTRR_IF_CENTAUR_MCR )
1312 	{
1313 	    /*
1314 	     *	FIXME: Winchip2 supports uncached
1315 	     */
1316 	    if (type != MTRR_TYPE_WRCOMB && (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE))
1317 	    {
1318 		printk (KERN_WARNING "mtrr: only write-combining%s supported\n",
1319 			centaur_mcr_type?" and uncacheable are":" is");
1320 		return -EINVAL;
1321 	    }
1322 	}
1323 	else if (base + size < 0x100)
1324 	{
1325 	    printk (KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
1326 		    base, size);
1327 	    return -EINVAL;
1328 	}
1329 	/*  Check upper bits of base and last are equal and lower bits are 0
1330 	    for base and 1 for last  */
1331 	last = base + size - 1;
1332 	for (lbase = base; !(lbase & 1) && (last & 1);
1333 	     lbase = lbase >> 1, last = last >> 1);
1334 	if (lbase != last)
1335 	{
1336 	    printk (KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
1337 		    base, size);
1338 	    return -EINVAL;
1339 	}
1340 	break;
1341 
1342     default:
1343 	return -EINVAL;
1344     }
1345 
1346     if (type >= MTRR_NUM_TYPES)
1347     {
1348 	printk ("mtrr: type: %u illegal\n", type);
1349 	return -EINVAL;
1350     }
1351 
1352     /*  If the type is WC, check that this processor supports it  */
1353     if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
1354     {
1355         printk (KERN_WARNING "mtrr: your processor doesn't support write-combining\n");
1356         return -ENOSYS;
1357     }
1358 
1359     if ( base & size_or_mask || size  & size_or_mask )
1360     {
1361 	printk ("mtrr: base or size exceeds the MTRR width\n");
1362 	return -EINVAL;
1363     }
1364 
1365     increment = increment ? 1 : 0;
1366     max = get_num_var_ranges ();
1367     /*  Search for existing MTRR  */
1368     down(&main_lock);
1369     for (i = 0; i < max; ++i)
1370     {
1371 	(*get_mtrr) (i, &lbase, &lsize, &ltype);
1372 	if (base >= lbase + lsize) continue;
1373 	if ( (base < lbase) && (base + size <= lbase) ) continue;
1374 	/*  At this point we know there is some kind of overlap/enclosure  */
1375 	if ( (base < lbase) || (base + size > lbase + lsize) )
1376 	{
1377 	    up(&main_lock);
1378 	    printk (KERN_WARNING "mtrr: 0x%lx000,0x%lx000 overlaps existing"
1379 		    " 0x%lx000,0x%lx000\n",
1380 		    base, size, lbase, lsize);
1381 	    return -EINVAL;
1382 	}
1383 	/*  New region is enclosed by an existing region  */
1384 	if (ltype != type)
1385 	{
1386 	    if (type == MTRR_TYPE_UNCACHABLE) continue;
1387 	    up(&main_lock);
1388 	    printk ( "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
1389 		     base, size, attrib_to_str (ltype), attrib_to_str (type) );
1390 	    return -EINVAL;
1391 	}
1392 	if (increment) ++usage_table[i];
1393 	compute_ascii ();
1394 	up(&main_lock);
1395 	return i;
1396     }
1397     /*  Search for an empty MTRR  */
1398     i = (*get_free_region) (base, size);
1399     if (i < 0)
1400     {
1401 	up(&main_lock);
1402 	printk ("mtrr: no more MTRRs available\n");
1403 	return i;
1404     }
1405     set_mtrr (i, base, size, type);
1406     usage_table[i] = 1;
1407     compute_ascii ();
1408     up(&main_lock);
1409     return i;
1410 }   /*  End Function mtrr_add_page  */
1411 
1412 /**
1413  *	mtrr_add - Add a memory type region
1414  *	@base: Physical base address of region
1415  *	@size: Physical size of region
1416  *	@type: Type of MTRR desired
1417  *	@increment: If this is true do usage counting on the region
1418  *
1419  *	Memory type region registers control the caching on newer Intel and
1420  *	non Intel processors. This function allows drivers to request an
1421  *	MTRR is added. The details and hardware specifics of each processor's
1422  *	implementation are hidden from the caller, but nevertheless the
1423  *	caller should expect to need to provide a power of two size on an
1424  *	equivalent power of two boundary.
1425  *
1426  *	If the region cannot be added either because all regions are in use
1427  *	or the CPU cannot support it a negative value is returned. On success
1428  *	the register number for this entry is returned, but should be treated
1429  *	as a cookie only.
1430  *
1431  *	On a multiprocessor machine the changes are made to all processors.
1432  *	This is required on x86 by the Intel processors.
1433  *
1434  *	The available types are
1435  *
1436  *	%MTRR_TYPE_UNCACHABLE	-	No caching
1437  *
1438  *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
1439  *
1440  *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
1441  *
1442  *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
1443  *
1444  *	BUGS: Needs a quiet flag for the cases where drivers do not mind
1445  *	failures and do not wish system log messages to be sent.
1446  */
1447 
mtrr_add(unsigned long base,unsigned long size,unsigned int type,char increment)1448 int mtrr_add(unsigned long base, unsigned long size, unsigned int type, char increment)
1449 {
1450 /*  [SUMMARY] Add an MTRR entry.
1451     <base> The starting (base) address of the region.
1452     <size> The size (in bytes) of the region.
1453     <type> The type of the new region.
1454     <increment> If true and the region already exists, the usage count will be
1455     incremented.
1456     [RETURNS] The MTRR register on success, else a negative number indicating
1457     the error code.
1458 */
1459 
1460     if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1461     {
1462 	printk ("mtrr: size and base must be multiples of 4 kiB\n");
1463 	printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1464 	return -EINVAL;
1465     }
1466     return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment);
1467 }   /*  End Function mtrr_add  */
1468 
1469 /**
1470  *	mtrr_del_page - delete a memory type region
1471  *	@reg: Register returned by mtrr_add
1472  *	@base: Physical base address
1473  *	@size: Size of region
1474  *
1475  *	If register is supplied then base and size are ignored. This is
1476  *	how drivers should call it.
1477  *
1478  *	Releases an MTRR region. If the usage count drops to zero the
1479  *	register is freed and the region returns to default state.
1480  *	On success the register is returned, on failure a negative error
1481  *	code.
1482  */
1483 
mtrr_del_page(int reg,unsigned long base,unsigned long size)1484 int mtrr_del_page (int reg, unsigned long base, unsigned long size)
1485 /*  [SUMMARY] Delete MTRR/decrement usage count.
1486     <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1487     be supplied.
1488     <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1489     <size> The size of the region. This is ignored if <<reg>> is >= 0.
1490     [RETURNS] The register on success, else a negative number indicating
1491     the error code.
1492     [NOTE] This routine uses a spinlock.
1493 */
1494 {
1495     int i, max;
1496     mtrr_type ltype;
1497     unsigned long lbase, lsize;
1498 
1499     if ( mtrr_if == MTRR_IF_NONE ) return -ENXIO;
1500 
1501     max = get_num_var_ranges ();
1502     down (&main_lock);
1503     if (reg < 0)
1504     {
1505 	/*  Search for existing MTRR  */
1506 	for (i = 0; i < max; ++i)
1507 	{
1508 	    (*get_mtrr) (i, &lbase, &lsize, &ltype);
1509 	    if (lbase == base && lsize == size)
1510 	    {
1511 		reg = i;
1512 		break;
1513 	    }
1514 	}
1515 	if (reg < 0)
1516 	{
1517 	    up(&main_lock);
1518 	    printk ("mtrr: no MTRR for %lx000,%lx000 found\n", base, size);
1519 	    return -EINVAL;
1520 	}
1521     }
1522     if (reg >= max)
1523     {
1524 	up (&main_lock);
1525 	printk ("mtrr: register: %d too big\n", reg);
1526 	return -EINVAL;
1527     }
1528     if ( mtrr_if == MTRR_IF_CYRIX_ARR )
1529     {
1530 	if ( (reg == 3) && arr3_protected )
1531 	{
1532 	    up (&main_lock);
1533 	    printk ("mtrr: ARR3 cannot be changed\n");
1534 	    return -EINVAL;
1535 	}
1536     }
1537     (*get_mtrr) (reg, &lbase, &lsize, &ltype);
1538     if (lsize < 1)
1539     {
1540 	up (&main_lock);
1541 	printk ("mtrr: MTRR %d not used\n", reg);
1542 	return -EINVAL;
1543     }
1544     if (usage_table[reg] < 1)
1545     {
1546 	up (&main_lock);
1547 	printk ("mtrr: reg: %d has count=0\n", reg);
1548 	return -EINVAL;
1549     }
1550     if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
1551     compute_ascii ();
1552     up (&main_lock);
1553     return reg;
1554 }   /*  End Function mtrr_del_page  */
1555 
1556 /**
1557  *	mtrr_del - delete a memory type region
1558  *	@reg: Register returned by mtrr_add
1559  *	@base: Physical base address
1560  *	@size: Size of region
1561  *
1562  *	If register is supplied then base and size are ignored. This is
1563  *	how drivers should call it.
1564  *
1565  *	Releases an MTRR region. If the usage count drops to zero the
1566  *	register is freed and the region returns to default state.
1567  *	On success the register is returned, on failure a negative error
1568  *	code.
1569  */
1570 
mtrr_del(int reg,unsigned long base,unsigned long size)1571 int mtrr_del (int reg, unsigned long base, unsigned long size)
1572 /*  [SUMMARY] Delete MTRR/decrement usage count.
1573     <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1574     be supplied.
1575     <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1576     <size> The size of the region. This is ignored if <<reg>> is >= 0.
1577     [RETURNS] The register on success, else a negative number indicating
1578     the error code.
1579 */
1580 {
1581     if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1582     {
1583 	printk ("mtrr: size and base must be multiples of 4 kiB\n");
1584 	printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1585 	return -EINVAL;
1586     }
1587     return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
1588 }
1589 
1590 #ifdef USERSPACE_INTERFACE
1591 
mtrr_file_add(unsigned long base,unsigned long size,unsigned int type,char increment,struct file * file,int page)1592 static int mtrr_file_add (unsigned long base, unsigned long size,
1593 			  unsigned int type, char increment, struct file *file, int page)
1594 {
1595     int reg, max;
1596     unsigned int *fcount = file->private_data;
1597 
1598     max = get_num_var_ranges ();
1599     if (fcount == NULL)
1600     {
1601 	if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
1602 	{
1603 	    printk ("mtrr: could not allocate\n");
1604 	    return -ENOMEM;
1605 	}
1606 	memset (fcount, 0, max * sizeof *fcount);
1607 	file->private_data = fcount;
1608     }
1609     if (!page) {
1610 	if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1611 	{
1612 	    printk ("mtrr: size and base must be multiples of 4 kiB\n");
1613 	    printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1614 	    return -EINVAL;
1615 	}
1616 	base >>= PAGE_SHIFT;
1617 	size >>= PAGE_SHIFT;
1618     }
1619     reg = mtrr_add_page (base, size, type, 1);
1620     if (reg >= 0) ++fcount[reg];
1621     return reg;
1622 }   /*  End Function mtrr_file_add  */
1623 
mtrr_file_del(unsigned long base,unsigned long size,struct file * file,int page)1624 static int mtrr_file_del (unsigned long base, unsigned long size,
1625 			  struct file *file, int page)
1626 {
1627     int reg;
1628     unsigned int *fcount = file->private_data;
1629 
1630     if (!page) {
1631 	if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1632 	{
1633 	    printk ("mtrr: size and base must be multiples of 4 kiB\n");
1634 	    printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1635 	    return -EINVAL;
1636 	}
1637 	base >>= PAGE_SHIFT;
1638 	size >>= PAGE_SHIFT;
1639     }
1640     reg = mtrr_del_page (-1, base, size);
1641     if (reg < 0) return reg;
1642     if (fcount == NULL) return reg;
1643     if (fcount[reg] < 1) return -EINVAL;
1644     --fcount[reg];
1645     return reg;
1646 }   /*  End Function mtrr_file_del  */
1647 
mtrr_read(struct file * file,char * buf,size_t len,loff_t * ppos)1648 static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
1649 			  loff_t *ppos)
1650 {
1651 	loff_t pos = *ppos;
1652 	if (pos < 0 || pos >= ascii_buf_bytes)
1653 		return 0;
1654 	if (len > ascii_buf_bytes - pos)
1655 		len = ascii_buf_bytes - pos;
1656 	if (copy_to_user(buf, ascii_buffer + pos, len))
1657 		return -EFAULT;
1658 	pos += len;
1659 	*ppos = pos;
1660 
1661 	return len;
1662 }   /*  End Function mtrr_read  */
1663 
mtrr_write(struct file * file,const char * buf,size_t len,loff_t * ppos)1664 static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
1665 			   loff_t *ppos)
1666 /*  Format of control line:
1667     "base=%Lx size=%Lx type=%s"     OR:
1668     "disable=%d"
1669 */
1670 {
1671     int i, err;
1672     unsigned long reg;
1673     unsigned long long base, size;
1674     char *ptr;
1675     char line[LINE_SIZE];
1676 
1677     if (!len) return -EINVAL;
1678     if ( !suser () ) return -EPERM;
1679     /*  Can't seek (pwrite) on this device  */
1680     if (ppos != &file->f_pos) return -ESPIPE;
1681     memset (line, 0, LINE_SIZE);
1682     if (len > LINE_SIZE) len = LINE_SIZE;
1683     if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
1684     ptr = line + strlen (line) - 1;
1685     if (*ptr == '\n') *ptr = '\0';
1686     if ( !strncmp (line, "disable=", 8) )
1687     {
1688 	reg = simple_strtoul (line + 8, &ptr, 0);
1689 	err = mtrr_del_page (reg, 0, 0);
1690 	if (err < 0) return err;
1691 	return len;
1692     }
1693     if ( strncmp (line, "base=", 5) )
1694     {
1695 	printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
1696 	return -EINVAL;
1697     }
1698     base = simple_strtoull (line + 5, &ptr, 0);
1699     for (; isspace (*ptr); ++ptr);
1700     if ( strncmp (ptr, "size=", 5) )
1701     {
1702 	printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
1703 	return -EINVAL;
1704     }
1705     size = simple_strtoull (ptr + 5, &ptr, 0);
1706     if ( (base & 0xfff) || (size & 0xfff) )
1707     {
1708 	printk ("mtrr: size and base must be multiples of 4 kiB\n");
1709 	printk ("mtrr: size: 0x%Lx  base: 0x%Lx\n", size, base);
1710 	return -EINVAL;
1711     }
1712     for (; isspace (*ptr); ++ptr);
1713     if ( strncmp (ptr, "type=", 5) )
1714     {
1715 	printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
1716 	return -EINVAL;
1717     }
1718     ptr += 5;
1719     for (; isspace (*ptr); ++ptr);
1720     for (i = 0; i < MTRR_NUM_TYPES; ++i)
1721     {
1722 	if ( strcmp (ptr, mtrr_strings[i]) ) continue;
1723 	base >>= PAGE_SHIFT;
1724 	size >>= PAGE_SHIFT;
1725 	err = mtrr_add_page ((unsigned long)base, (unsigned long)size, i, 1);
1726 	if (err < 0) return err;
1727 	return len;
1728     }
1729     printk ("mtrr: illegal type: \"%s\"\n", ptr);
1730     return -EINVAL;
1731 }   /*  End Function mtrr_write  */
1732 
mtrr_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)1733 static int mtrr_ioctl (struct inode *inode, struct file *file,
1734 		       unsigned int cmd, unsigned long arg)
1735 {
1736     int err;
1737     mtrr_type type;
1738     struct mtrr_sentry sentry;
1739     struct mtrr_gentry gentry;
1740 
1741     switch (cmd)
1742     {
1743       default:
1744 	return -ENOIOCTLCMD;
1745       case MTRRIOC_ADD_ENTRY:
1746 	if ( !suser () ) return -EPERM;
1747 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1748 	    return -EFAULT;
1749 	err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 0);
1750 	if (err < 0) return err;
1751 	break;
1752       case MTRRIOC_SET_ENTRY:
1753 	if ( !suser () ) return -EPERM;
1754 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1755 	    return -EFAULT;
1756 	err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1757 	if (err < 0) return err;
1758 	break;
1759       case MTRRIOC_DEL_ENTRY:
1760 	if ( !suser () ) return -EPERM;
1761 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1762 	    return -EFAULT;
1763 	err = mtrr_file_del (sentry.base, sentry.size, file, 0);
1764 	if (err < 0) return err;
1765 	break;
1766       case MTRRIOC_KILL_ENTRY:
1767 	if ( !suser () ) return -EPERM;
1768 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1769 	    return -EFAULT;
1770 	err = mtrr_del (-1, sentry.base, sentry.size);
1771 	if (err < 0) return err;
1772 	break;
1773       case MTRRIOC_GET_ENTRY:
1774 	if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1775 	    return -EFAULT;
1776 	if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1777 	(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1778 
1779 	/* Hide entries that go above 4GB */
1780 	if (gentry.base + gentry.size > 0x100000 || gentry.size == 0x100000)
1781 	    gentry.base = gentry.size = gentry.type = 0;
1782 	else {
1783 	    gentry.base <<= PAGE_SHIFT;
1784 	    gentry.size <<= PAGE_SHIFT;
1785 	    gentry.type = type;
1786 	}
1787 
1788 	if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1789 	     return -EFAULT;
1790 	break;
1791       case MTRRIOC_ADD_PAGE_ENTRY:
1792 	if ( !suser () ) return -EPERM;
1793 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1794 	    return -EFAULT;
1795 	err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 1);
1796 	if (err < 0) return err;
1797 	break;
1798       case MTRRIOC_SET_PAGE_ENTRY:
1799 	if ( !suser () ) return -EPERM;
1800 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1801 	    return -EFAULT;
1802 	err = mtrr_add_page (sentry.base, sentry.size, sentry.type, 0);
1803 	if (err < 0) return err;
1804 	break;
1805       case MTRRIOC_DEL_PAGE_ENTRY:
1806 	if ( !suser () ) return -EPERM;
1807 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1808 	    return -EFAULT;
1809 	err = mtrr_file_del (sentry.base, sentry.size, file, 1);
1810 	if (err < 0) return err;
1811 	break;
1812       case MTRRIOC_KILL_PAGE_ENTRY:
1813 	if ( !suser () ) return -EPERM;
1814 	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1815 	    return -EFAULT;
1816 	err = mtrr_del_page (-1, sentry.base, sentry.size);
1817 	if (err < 0) return err;
1818 	break;
1819       case MTRRIOC_GET_PAGE_ENTRY:
1820 	if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1821 	    return -EFAULT;
1822 	if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1823 	(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1824 	gentry.type = type;
1825 
1826 	if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1827 	     return -EFAULT;
1828 	break;
1829     }
1830     return 0;
1831 }   /*  End Function mtrr_ioctl  */
1832 
mtrr_close(struct inode * ino,struct file * file)1833 static int mtrr_close (struct inode *ino, struct file *file)
1834 {
1835     int i, max;
1836     unsigned int *fcount = file->private_data;
1837 
1838     if (fcount == NULL) return 0;
1839     lock_kernel();
1840     max = get_num_var_ranges ();
1841     for (i = 0; i < max; ++i)
1842     {
1843 	while (fcount[i] > 0)
1844 	{
1845 	    if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1846 	    --fcount[i];
1847 	}
1848     }
1849     unlock_kernel();
1850     kfree (fcount);
1851     file->private_data = NULL;
1852     return 0;
1853 }   /*  End Function mtrr_close  */
1854 
1855 static struct file_operations mtrr_fops =
1856 {
1857     owner:	THIS_MODULE,
1858     read:	mtrr_read,
1859     write:	mtrr_write,
1860     ioctl:	mtrr_ioctl,
1861     release:	mtrr_close,
1862 };
1863 
1864 #  ifdef CONFIG_PROC_FS
1865 
1866 static struct proc_dir_entry *proc_root_mtrr;
1867 
1868 #  endif  /*  CONFIG_PROC_FS  */
1869 
1870 static devfs_handle_t devfs_handle;
1871 
compute_ascii(void)1872 static void compute_ascii (void)
1873 {
1874     char factor;
1875     int i, max;
1876     mtrr_type type;
1877     unsigned long base, size;
1878 
1879     ascii_buf_bytes = 0;
1880     max = get_num_var_ranges ();
1881     for (i = 0; i < max; i++)
1882     {
1883 	(*get_mtrr) (i, &base, &size, &type);
1884 	if (size == 0) usage_table[i] = 0;
1885 	else
1886 	{
1887 	    if (size < (0x100000 >> PAGE_SHIFT))
1888 	    {
1889 		/* less than 1MB */
1890 		factor = 'K';
1891 		size <<= PAGE_SHIFT - 10;
1892 	    }
1893 	    else
1894 	    {
1895 		factor = 'M';
1896 		size >>= 20 - PAGE_SHIFT;
1897 	    }
1898 	    sprintf
1899 		(ascii_buffer + ascii_buf_bytes,
1900 		 "reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n",
1901 		 i, base, base >> (20 - PAGE_SHIFT), size, factor,
1902 		 attrib_to_str (type), usage_table[i]);
1903 	    ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1904 	}
1905     }
1906     devfs_set_file_size (devfs_handle, ascii_buf_bytes);
1907 #  ifdef CONFIG_PROC_FS
1908     if (proc_root_mtrr)
1909 	proc_root_mtrr->size = ascii_buf_bytes;
1910 #  endif  /*  CONFIG_PROC_FS  */
1911 }   /*  End Function compute_ascii  */
1912 
1913 #endif  /*  USERSPACE_INTERFACE  */
1914 
1915 EXPORT_SYMBOL(mtrr_add);
1916 EXPORT_SYMBOL(mtrr_del);
1917 
1918 #ifdef CONFIG_SMP
1919 
1920 typedef struct
1921 {
1922     unsigned long base;
1923     unsigned long size;
1924     mtrr_type type;
1925 } arr_state_t;
1926 
1927 arr_state_t arr_state[8] __initdata =
1928 {
1929     {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
1930     {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
1931 };
1932 
1933 unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
1934 
cyrix_arr_init_secondary(void)1935 static void __init cyrix_arr_init_secondary(void)
1936 {
1937     struct set_mtrr_context ctxt;
1938     int i;
1939 
1940     /* flush cache and enable MAPEN */
1941     set_mtrr_prepare_save (&ctxt);
1942     set_mtrr_disable (&ctxt);
1943 
1944      /* the CCRs are not contiguous */
1945     for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
1946     for(   ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
1947     for(i=0; i<8; i++)
1948       cyrix_set_arr_up(i,
1949         arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
1950 
1951     set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1952 }   /*  End Function cyrix_arr_init_secondary  */
1953 
1954 #endif
1955 
1956 /*
1957  * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
1958  * with the SMM (System Management Mode) mode. So we need the following:
1959  * Check whether SMI_LOCK (CCR3 bit 0) is set
1960  *   if it is set, write a warning message: ARR3 cannot be changed!
1961  *     (it cannot be changed until the next processor reset)
1962  *   if it is reset, then we can change it, set all the needed bits:
1963  *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
1964  *   - disable access to SMM memory (CCR1 bit 2 reset)
1965  *   - disable SMM mode (CCR1 bit 1 reset)
1966  *   - disable write protection of ARR3 (CCR6 bit 1 reset)
1967  *   - (maybe) disable ARR3
1968  * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
1969  */
cyrix_arr_init(void)1970 static void __init cyrix_arr_init(void)
1971 {
1972     struct set_mtrr_context ctxt;
1973     unsigned char ccr[7];
1974     int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
1975 #ifdef CONFIG_SMP
1976     int i;
1977 #endif
1978 
1979     /* flush cache and enable MAPEN */
1980     set_mtrr_prepare_save (&ctxt);
1981     set_mtrr_disable (&ctxt);
1982 
1983     /* Save all CCRs locally */
1984     ccr[0] = getCx86 (CX86_CCR0);
1985     ccr[1] = getCx86 (CX86_CCR1);
1986     ccr[2] = getCx86 (CX86_CCR2);
1987     ccr[3] = ctxt.ccr3;
1988     ccr[4] = getCx86 (CX86_CCR4);
1989     ccr[5] = getCx86 (CX86_CCR5);
1990     ccr[6] = getCx86 (CX86_CCR6);
1991 
1992     if (ccr[3] & 1)
1993     {
1994 	ccrc[3] = 1;
1995 	arr3_protected = 1;
1996     }
1997     else
1998     {
1999 	/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
2000 	 * access to SMM memory through ARR3 (bit 7).
2001 	 */
2002 	if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
2003 	if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
2004 	if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
2005 	arr3_protected = 0;
2006 	if (ccr[6] & 0x02) {
2007 	    ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3 */
2008 	    setCx86 (CX86_CCR6, ccr[6]);
2009 	}
2010 	/* Disable ARR3. This is safe now that we disabled SMM. */
2011 	/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
2012     }
2013     /* If we changed CCR1 in memory, change it in the processor, too. */
2014     if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
2015 
2016     /* Enable ARR usage by the processor */
2017     if (!(ccr[5] & 0x20))
2018     {
2019 	ccr[5] |= 0x20; ccrc[5] = 1;
2020 	setCx86 (CX86_CCR5, ccr[5]);
2021     }
2022 
2023 #ifdef CONFIG_SMP
2024     for(i=0; i<7; i++) ccr_state[i] = ccr[i];
2025     for(i=0; i<8; i++)
2026       cyrix_get_arr(i,
2027         &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
2028 #endif
2029 
2030     set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
2031 
2032     if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
2033     if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
2034 /*
2035     if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
2036     if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
2037     if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
2038 */
2039     if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
2040 }   /*  End Function cyrix_arr_init  */
2041 
2042 /*
2043  *	Initialise the later (saner) Winchip MCR variant. In this version
2044  *	the BIOS can pass us the registers it has used (but not their values)
2045  *	and the control register is read/write
2046  */
2047 
centaur_mcr1_init(void)2048 static void __init centaur_mcr1_init(void)
2049 {
2050     unsigned i;
2051     u32 lo, hi;
2052 
2053     /* Unfortunately, MCR's are read-only, so there is no way to
2054      * find out what the bios might have done.
2055      */
2056 
2057     rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
2058     if(((lo>>17)&7)==1)		/* Type 1 Winchip2 MCR */
2059     {
2060     	lo&= ~0x1C0;		/* clear key */
2061     	lo|= 0x040;		/* set key to 1 */
2062 	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);	/* unlock MCR */
2063     }
2064 
2065     centaur_mcr_type = 1;
2066 
2067     /*
2068      *	Clear any unconfigured MCR's.
2069      */
2070 
2071     for (i = 0; i < 8; ++i)
2072     {
2073     	if(centaur_mcr[i]. high == 0 && centaur_mcr[i].low == 0)
2074     	{
2075     		if(!(lo & (1<<(9+i))))
2076 			wrmsr (MSR_IDT_MCR0 + i , 0, 0);
2077 		else
2078 			/*
2079 			 *	If the BIOS set up an MCR we cannot see it
2080 			 *	but we don't wish to obliterate it
2081 			 */
2082 			centaur_mcr_reserved |= (1<<i);
2083 	}
2084     }
2085     /*
2086      *	Throw the main write-combining switch...
2087      *	However if OOSTORE is enabled then people have already done far
2088      *  cleverer things and we should behave.
2089      */
2090 
2091     lo |= 15;			/* Write combine enables */
2092     wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
2093 }   /*  End Function centaur_mcr1_init  */
2094 
2095 /*
2096  *	Initialise the original winchip with read only MCR registers
2097  *	no used bitmask for the BIOS to pass on and write only control
2098  */
2099 
centaur_mcr0_init(void)2100 static void __init centaur_mcr0_init(void)
2101 {
2102     unsigned i;
2103 
2104     /* Unfortunately, MCR's are read-only, so there is no way to
2105      * find out what the bios might have done.
2106      */
2107 
2108     /* Clear any unconfigured MCR's.
2109      * This way we are sure that the centaur_mcr array contains the actual
2110      * values. The disadvantage is that any BIOS tweaks are thus undone.
2111      *
2112      */
2113     for (i = 0; i < 8; ++i)
2114     {
2115     	if(centaur_mcr[i]. high == 0 && centaur_mcr[i].low == 0)
2116 		wrmsr (MSR_IDT_MCR0 + i , 0, 0);
2117     }
2118 
2119     wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);	/* Write only */
2120 }   /*  End Function centaur_mcr0_init  */
2121 
2122 /*
2123  *	Initialise Winchip series MCR registers
2124  */
2125 
centaur_mcr_init(void)2126 static void __init centaur_mcr_init(void)
2127 {
2128     struct set_mtrr_context ctxt;
2129 
2130     set_mtrr_prepare_save (&ctxt);
2131     set_mtrr_disable (&ctxt);
2132 
2133     if(boot_cpu_data.x86_model==4)
2134     	centaur_mcr0_init();
2135     else if(boot_cpu_data.x86_model==8 || boot_cpu_data.x86_model == 9)
2136     	centaur_mcr1_init();
2137 
2138     set_mtrr_done (&ctxt);
2139 }   /*  End Function centaur_mcr_init  */
2140 
mtrr_setup(void)2141 static int __init mtrr_setup(void)
2142 {
2143     if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {
2144 	/* Intel (P6) standard MTRRs */
2145 	mtrr_if = MTRR_IF_INTEL;
2146 	get_mtrr = intel_get_mtrr;
2147 	set_mtrr_up = intel_set_mtrr_up;
2148 	switch (boot_cpu_data.x86_vendor) {
2149 
2150 	case X86_VENDOR_AMD:
2151 		/* The original Athlon docs said that
2152 		   total addressable memory is 44 bits wide.
2153 		   It was not really clear whether its MTRRs
2154 		   follow this or not. (Read: 44 or 36 bits).
2155 		   However, "x86-64_overview.pdf" explicitly
2156 		   states that "previous implementations support
2157 		   36 bit MTRRs" and also provides a way to
2158 		   query the width (in bits) of the physical
2159 		   addressable memory on the Hammer family.
2160 		 */
2161 		if (boot_cpu_data.x86 == 15 && (cpuid_eax(0x80000000) >= 0x80000008)) {
2162 			u32	phys_addr;
2163 			phys_addr = cpuid_eax(0x80000008) & 0xff ;
2164 			size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
2165 			size_and_mask = ~size_or_mask & 0xfff00000;
2166 			break;
2167 		}
2168 		size_or_mask  = 0xff000000; /* 36 bits */
2169 		size_and_mask = 0x00f00000;
2170 		break;
2171 
2172 	case X86_VENDOR_CENTAUR:
2173 		/* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
2174 		if (boot_cpu_data.x86 == 6) {
2175 			size_or_mask  = 0xfff00000; /* 32 bits */
2176 			size_and_mask = 0;
2177 		}
2178 		break;
2179 
2180 	default:
2181 		/* Intel, etc. */
2182 		size_or_mask  = 0xff000000; /* 36 bits */
2183 		size_and_mask = 0x00f00000;
2184 		break;
2185 	}
2186 
2187     } else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {
2188 	/* Pre-Athlon (K6) AMD CPU MTRRs */
2189 	mtrr_if = MTRR_IF_AMD_K6;
2190 	get_mtrr = amd_get_mtrr;
2191 	set_mtrr_up = amd_set_mtrr_up;
2192 	size_or_mask  = 0xfff00000; /* 32 bits */
2193 	size_and_mask = 0;
2194     } else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {
2195 	/* Cyrix ARRs */
2196 	mtrr_if = MTRR_IF_CYRIX_ARR;
2197 	get_mtrr = cyrix_get_arr;
2198 	set_mtrr_up = cyrix_set_arr_up;
2199 	get_free_region = cyrix_get_free_region;
2200 	cyrix_arr_init();
2201 	size_or_mask  = 0xfff00000; /* 32 bits */
2202 	size_and_mask = 0;
2203     } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {
2204 	/* Centaur MCRs */
2205 	mtrr_if = MTRR_IF_CENTAUR_MCR;
2206 	get_mtrr = centaur_get_mcr;
2207 	set_mtrr_up = centaur_set_mcr_up;
2208 	get_free_region = centaur_get_free_region;
2209 	centaur_mcr_init();
2210 	size_or_mask  = 0xfff00000; /* 32 bits */
2211 	size_and_mask = 0;
2212     } else {
2213 	/* No supported MTRR interface */
2214 	mtrr_if = MTRR_IF_NONE;
2215     }
2216 
2217     printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
2218 	    "mtrr: detected mtrr type: %s\n",
2219 	    MTRR_VERSION, mtrr_if_name[mtrr_if]);
2220 
2221     return (mtrr_if != MTRR_IF_NONE);
2222 }   /*  End Function mtrr_setup  */
2223 
2224 #ifdef CONFIG_SMP
2225 
2226 static volatile unsigned long smp_changes_mask __initdata = 0;
2227 static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
2228 
mtrr_init_boot_cpu(void)2229 void __init mtrr_init_boot_cpu(void)
2230 {
2231     if ( !mtrr_setup () )
2232 	return;
2233 
2234     if ( mtrr_if == MTRR_IF_INTEL ) {
2235 	/* Only for Intel MTRRs */
2236 	get_mtrr_state (&smp_mtrr_state);
2237     }
2238 }   /*  End Function mtrr_init_boot_cpu  */
2239 
intel_mtrr_init_secondary_cpu(void)2240 static void __init intel_mtrr_init_secondary_cpu(void)
2241 {
2242     unsigned long mask, count;
2243     struct set_mtrr_context ctxt;
2244 
2245     /*  Note that this is not ideal, since the cache is only flushed/disabled
2246 	for this CPU while the MTRRs are changed, but changing this requires
2247 	more invasive changes to the way the kernel boots  */
2248     set_mtrr_prepare_save (&ctxt);
2249     set_mtrr_disable (&ctxt);
2250     mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
2251     set_mtrr_done (&ctxt);
2252     /*  Use the atomic bitops to update the global mask  */
2253     for (count = 0; count < sizeof mask * 8; ++count)
2254     {
2255 	if (mask & 0x01) set_bit (count, &smp_changes_mask);
2256 	mask >>= 1;
2257     }
2258 }   /*  End Function intel_mtrr_init_secondary_cpu  */
2259 
mtrr_init_secondary_cpu(void)2260 void __init mtrr_init_secondary_cpu(void)
2261 {
2262     switch ( mtrr_if ) {
2263     case MTRR_IF_INTEL:
2264 	/* Intel (P6) standard MTRRs */
2265 	intel_mtrr_init_secondary_cpu();
2266 	break;
2267     case MTRR_IF_CYRIX_ARR:
2268 	/* This is _completely theoretical_!
2269 	 * I assume here that one day Cyrix will support Intel APIC.
2270 	 * In reality on non-Intel CPUs we won't even get to this routine.
2271 	 * Hopefully no one will plug two Cyrix processors in a dual P5 board.
2272 	 *  :-)
2273 	 */
2274 	cyrix_arr_init_secondary ();
2275 	break;
2276     case MTRR_IF_NONE:
2277 	break;
2278     default:
2279 	/* I see no MTRRs I can support in SMP mode... */
2280 	printk ("mtrr: SMP support incomplete for this vendor\n");
2281     }
2282 }   /*  End Function mtrr_init_secondary_cpu  */
2283 #endif  /*  CONFIG_SMP  */
2284 
mtrr_init(void)2285 int __init mtrr_init(void)
2286 {
2287 #ifdef CONFIG_SMP
2288     /* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */
2289 
2290     if ( mtrr_if == MTRR_IF_INTEL ) {
2291 	finalize_mtrr_state (&smp_mtrr_state);
2292 	mtrr_state_warn (smp_changes_mask);
2293     }
2294 #else
2295     if ( !mtrr_setup() )
2296 	return 0;		/* MTRRs not supported? */
2297 #endif
2298 
2299 #ifdef CONFIG_PROC_FS
2300     proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
2301     if (proc_root_mtrr) {
2302 	proc_root_mtrr->owner = THIS_MODULE;
2303 	proc_root_mtrr->proc_fops = &mtrr_fops;
2304     }
2305 #endif
2306 #ifdef USERSPACE_INTERFACE
2307     devfs_handle = devfs_register (NULL, "cpu/mtrr", DEVFS_FL_DEFAULT, 0, 0,
2308 				   S_IFREG | S_IRUGO | S_IWUSR,
2309 				   &mtrr_fops, NULL);
2310 #endif
2311     init_table ();
2312     return 0;
2313 }   /*  End Function mtrr_init  */
2314 
2315 /*
2316  * Local Variables:
2317  * mode:c
2318  * c-file-style:"k&r"
2319  * c-basic-offset:4
2320  * End:
2321  */
2322