1 /*
2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
3 * M (part of the Centrino chipset).
4 *
5 * Since the original Pentium M, most new Intel CPUs support Enhanced
6 * SpeedStep.
7 *
8 * Despite the "SpeedStep" in the name, this is almost entirely unlike
9 * traditional SpeedStep.
10 *
11 * Modelled on speedstep.c
12 *
13 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sched.h> /* current */
21 #include <linux/delay.h>
22 #include <linux/compiler.h>
23 #include <linux/gfp.h>
24
25 #include <asm/msr.h>
26 #include <asm/processor.h>
27 #include <asm/cpufeature.h>
28 #include <asm/cpu_device_id.h>
29
30 #define PFX "speedstep-centrino: "
31 #define MAINTAINER "cpufreq@vger.kernel.org"
32
33 #define INTEL_MSR_RANGE (0xffff)
34
35 struct cpu_id
36 {
37 __u8 x86; /* CPU family */
38 __u8 x86_model; /* model */
39 __u8 x86_mask; /* stepping */
40 };
41
42 enum {
43 CPU_BANIAS,
44 CPU_DOTHAN_A1,
45 CPU_DOTHAN_A2,
46 CPU_DOTHAN_B0,
47 CPU_MP4HT_D0,
48 CPU_MP4HT_E0,
49 };
50
51 static const struct cpu_id cpu_ids[] = {
52 [CPU_BANIAS] = { 6, 9, 5 },
53 [CPU_DOTHAN_A1] = { 6, 13, 1 },
54 [CPU_DOTHAN_A2] = { 6, 13, 2 },
55 [CPU_DOTHAN_B0] = { 6, 13, 6 },
56 [CPU_MP4HT_D0] = {15, 3, 4 },
57 [CPU_MP4HT_E0] = {15, 4, 1 },
58 };
59 #define N_IDS ARRAY_SIZE(cpu_ids)
60
61 struct cpu_model
62 {
63 const struct cpu_id *cpu_id;
64 const char *model_name;
65 unsigned max_freq; /* max clock in kHz */
66
67 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
68 };
69 static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
70 const struct cpu_id *x);
71
72 /* Operating points for current CPU */
73 static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
74 static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
75
76 static struct cpufreq_driver centrino_driver;
77
78 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
79
80 /* Computes the correct form for IA32_PERF_CTL MSR for a particular
81 frequency/voltage operating point; frequency in MHz, volts in mV.
82 This is stored as "index" in the structure. */
83 #define OP(mhz, mv) \
84 { \
85 .frequency = (mhz) * 1000, \
86 .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \
87 }
88
89 /*
90 * These voltage tables were derived from the Intel Pentium M
91 * datasheet, document 25261202.pdf, Table 5. I have verified they
92 * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
93 * M.
94 */
95
96 /* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
97 static struct cpufreq_frequency_table banias_900[] =
98 {
99 OP(600, 844),
100 OP(800, 988),
101 OP(900, 1004),
102 { .frequency = CPUFREQ_TABLE_END }
103 };
104
105 /* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
106 static struct cpufreq_frequency_table banias_1000[] =
107 {
108 OP(600, 844),
109 OP(800, 972),
110 OP(900, 988),
111 OP(1000, 1004),
112 { .frequency = CPUFREQ_TABLE_END }
113 };
114
115 /* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
116 static struct cpufreq_frequency_table banias_1100[] =
117 {
118 OP( 600, 956),
119 OP( 800, 1020),
120 OP( 900, 1100),
121 OP(1000, 1164),
122 OP(1100, 1180),
123 { .frequency = CPUFREQ_TABLE_END }
124 };
125
126
127 /* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
128 static struct cpufreq_frequency_table banias_1200[] =
129 {
130 OP( 600, 956),
131 OP( 800, 1004),
132 OP( 900, 1020),
133 OP(1000, 1100),
134 OP(1100, 1164),
135 OP(1200, 1180),
136 { .frequency = CPUFREQ_TABLE_END }
137 };
138
139 /* Intel Pentium M processor 1.30GHz (Banias) */
140 static struct cpufreq_frequency_table banias_1300[] =
141 {
142 OP( 600, 956),
143 OP( 800, 1260),
144 OP(1000, 1292),
145 OP(1200, 1356),
146 OP(1300, 1388),
147 { .frequency = CPUFREQ_TABLE_END }
148 };
149
150 /* Intel Pentium M processor 1.40GHz (Banias) */
151 static struct cpufreq_frequency_table banias_1400[] =
152 {
153 OP( 600, 956),
154 OP( 800, 1180),
155 OP(1000, 1308),
156 OP(1200, 1436),
157 OP(1400, 1484),
158 { .frequency = CPUFREQ_TABLE_END }
159 };
160
161 /* Intel Pentium M processor 1.50GHz (Banias) */
162 static struct cpufreq_frequency_table banias_1500[] =
163 {
164 OP( 600, 956),
165 OP( 800, 1116),
166 OP(1000, 1228),
167 OP(1200, 1356),
168 OP(1400, 1452),
169 OP(1500, 1484),
170 { .frequency = CPUFREQ_TABLE_END }
171 };
172
173 /* Intel Pentium M processor 1.60GHz (Banias) */
174 static struct cpufreq_frequency_table banias_1600[] =
175 {
176 OP( 600, 956),
177 OP( 800, 1036),
178 OP(1000, 1164),
179 OP(1200, 1276),
180 OP(1400, 1420),
181 OP(1600, 1484),
182 { .frequency = CPUFREQ_TABLE_END }
183 };
184
185 /* Intel Pentium M processor 1.70GHz (Banias) */
186 static struct cpufreq_frequency_table banias_1700[] =
187 {
188 OP( 600, 956),
189 OP( 800, 1004),
190 OP(1000, 1116),
191 OP(1200, 1228),
192 OP(1400, 1308),
193 OP(1700, 1484),
194 { .frequency = CPUFREQ_TABLE_END }
195 };
196 #undef OP
197
198 #define _BANIAS(cpuid, max, name) \
199 { .cpu_id = cpuid, \
200 .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
201 .max_freq = (max)*1000, \
202 .op_points = banias_##max, \
203 }
204 #define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
205
206 /* CPU models, their operating frequency range, and freq/voltage
207 operating points */
208 static struct cpu_model models[] =
209 {
210 _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
211 BANIAS(1000),
212 BANIAS(1100),
213 BANIAS(1200),
214 BANIAS(1300),
215 BANIAS(1400),
216 BANIAS(1500),
217 BANIAS(1600),
218 BANIAS(1700),
219
220 /* NULL model_name is a wildcard */
221 { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
222 { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
223 { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
224 { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
225 { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
226
227 { NULL, }
228 };
229 #undef _BANIAS
230 #undef BANIAS
231
centrino_cpu_init_table(struct cpufreq_policy * policy)232 static int centrino_cpu_init_table(struct cpufreq_policy *policy)
233 {
234 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
235 struct cpu_model *model;
236
237 for(model = models; model->cpu_id != NULL; model++)
238 if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
239 (model->model_name == NULL ||
240 strcmp(cpu->x86_model_id, model->model_name) == 0))
241 break;
242
243 if (model->cpu_id == NULL) {
244 /* No match at all */
245 pr_debug("no support for CPU model \"%s\": "
246 "send /proc/cpuinfo to " MAINTAINER "\n",
247 cpu->x86_model_id);
248 return -ENOENT;
249 }
250
251 if (model->op_points == NULL) {
252 /* Matched a non-match */
253 pr_debug("no table support for CPU model \"%s\"\n",
254 cpu->x86_model_id);
255 pr_debug("try using the acpi-cpufreq driver\n");
256 return -ENOENT;
257 }
258
259 per_cpu(centrino_model, policy->cpu) = model;
260
261 pr_debug("found \"%s\": max frequency: %dkHz\n",
262 model->model_name, model->max_freq);
263
264 return 0;
265 }
266
267 #else
centrino_cpu_init_table(struct cpufreq_policy * policy)268 static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
269 {
270 return -ENODEV;
271 }
272 #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
273
centrino_verify_cpu_id(const struct cpuinfo_x86 * c,const struct cpu_id * x)274 static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
275 const struct cpu_id *x)
276 {
277 if ((c->x86 == x->x86) &&
278 (c->x86_model == x->x86_model) &&
279 (c->x86_mask == x->x86_mask))
280 return 1;
281 return 0;
282 }
283
284 /* To be called only after centrino_model is initialized */
extract_clock(unsigned msr,unsigned int cpu,int failsafe)285 static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
286 {
287 int i;
288
289 /*
290 * Extract clock in kHz from PERF_CTL value
291 * for centrino, as some DSDTs are buggy.
292 * Ideally, this can be done using the acpi_data structure.
293 */
294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
297 msr = (msr >> 8) & 0xff;
298 return msr * 100000;
299 }
300
301 if ((!per_cpu(centrino_model, cpu)) ||
302 (!per_cpu(centrino_model, cpu)->op_points))
303 return 0;
304
305 msr &= 0xffff;
306 for (i = 0;
307 per_cpu(centrino_model, cpu)->op_points[i].frequency
308 != CPUFREQ_TABLE_END;
309 i++) {
310 if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
311 return per_cpu(centrino_model, cpu)->
312 op_points[i].frequency;
313 }
314 if (failsafe)
315 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
316 else
317 return 0;
318 }
319
320 /* Return the current CPU frequency in kHz */
get_cur_freq(unsigned int cpu)321 static unsigned int get_cur_freq(unsigned int cpu)
322 {
323 unsigned l, h;
324 unsigned clock_freq;
325
326 rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
327 clock_freq = extract_clock(l, cpu, 0);
328
329 if (unlikely(clock_freq == 0)) {
330 /*
331 * On some CPUs, we can see transient MSR values (which are
332 * not present in _PSS), while CPU is doing some automatic
333 * P-state transition (like TM2). Get the last freq set
334 * in PERF_CTL.
335 */
336 rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
337 clock_freq = extract_clock(l, cpu, 1);
338 }
339 return clock_freq;
340 }
341
342
centrino_cpu_init(struct cpufreq_policy * policy)343 static int centrino_cpu_init(struct cpufreq_policy *policy)
344 {
345 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
346 unsigned freq;
347 unsigned l, h;
348 int ret;
349 int i;
350
351 /* Only Intel makes Enhanced Speedstep-capable CPUs */
352 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
353 !cpu_has(cpu, X86_FEATURE_EST))
354 return -ENODEV;
355
356 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
357 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
358
359 if (policy->cpu != 0)
360 return -ENODEV;
361
362 for (i = 0; i < N_IDS; i++)
363 if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
364 break;
365
366 if (i != N_IDS)
367 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
368
369 if (!per_cpu(centrino_cpu, policy->cpu)) {
370 pr_debug("found unsupported CPU with "
371 "Enhanced SpeedStep: send /proc/cpuinfo to "
372 MAINTAINER "\n");
373 return -ENODEV;
374 }
375
376 if (centrino_cpu_init_table(policy)) {
377 return -ENODEV;
378 }
379
380 /* Check to see if Enhanced SpeedStep is enabled, and try to
381 enable it if not. */
382 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
383
384 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
385 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
386 pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l);
387 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
388
389 /* check to see if it stuck */
390 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
391 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
392 printk(KERN_INFO PFX
393 "couldn't enable Enhanced SpeedStep\n");
394 return -ENODEV;
395 }
396 }
397
398 freq = get_cur_freq(policy->cpu);
399 policy->cpuinfo.transition_latency = 10000;
400 /* 10uS transition latency */
401 policy->cur = freq;
402
403 pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
404
405 ret = cpufreq_frequency_table_cpuinfo(policy,
406 per_cpu(centrino_model, policy->cpu)->op_points);
407 if (ret)
408 return (ret);
409
410 cpufreq_frequency_table_get_attr(
411 per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
412
413 return 0;
414 }
415
centrino_cpu_exit(struct cpufreq_policy * policy)416 static int centrino_cpu_exit(struct cpufreq_policy *policy)
417 {
418 unsigned int cpu = policy->cpu;
419
420 if (!per_cpu(centrino_model, cpu))
421 return -ENODEV;
422
423 cpufreq_frequency_table_put_attr(cpu);
424
425 per_cpu(centrino_model, cpu) = NULL;
426
427 return 0;
428 }
429
430 /**
431 * centrino_verify - verifies a new CPUFreq policy
432 * @policy: new policy
433 *
434 * Limit must be within this model's frequency range at least one
435 * border included.
436 */
centrino_verify(struct cpufreq_policy * policy)437 static int centrino_verify (struct cpufreq_policy *policy)
438 {
439 return cpufreq_frequency_table_verify(policy,
440 per_cpu(centrino_model, policy->cpu)->op_points);
441 }
442
443 /**
444 * centrino_setpolicy - set a new CPUFreq policy
445 * @policy: new policy
446 * @target_freq: the target frequency
447 * @relation: how that frequency relates to achieved frequency
448 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
449 *
450 * Sets a new CPUFreq policy.
451 */
centrino_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)452 static int centrino_target (struct cpufreq_policy *policy,
453 unsigned int target_freq,
454 unsigned int relation)
455 {
456 unsigned int newstate = 0;
457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
458 struct cpufreq_freqs freqs;
459 int retval = 0;
460 unsigned int j, k, first_cpu, tmp;
461 cpumask_var_t covered_cpus;
462
463 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
464 return -ENOMEM;
465
466 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
467 retval = -ENODEV;
468 goto out;
469 }
470
471 if (unlikely(cpufreq_frequency_table_target(policy,
472 per_cpu(centrino_model, cpu)->op_points,
473 target_freq,
474 relation,
475 &newstate))) {
476 retval = -EINVAL;
477 goto out;
478 }
479
480 first_cpu = 1;
481 for_each_cpu(j, policy->cpus) {
482 int good_cpu;
483
484 /* cpufreq holds the hotplug lock, so we are safe here */
485 if (!cpu_online(j))
486 continue;
487
488 /*
489 * Support for SMP systems.
490 * Make sure we are running on CPU that wants to change freq
491 */
492 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
493 good_cpu = cpumask_any_and(policy->cpus,
494 cpu_online_mask);
495 else
496 good_cpu = j;
497
498 if (good_cpu >= nr_cpu_ids) {
499 pr_debug("couldn't limit to CPUs in this domain\n");
500 retval = -EAGAIN;
501 if (first_cpu) {
502 /* We haven't started the transition yet. */
503 goto out;
504 }
505 break;
506 }
507
508 msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
509
510 if (first_cpu) {
511 rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
512 if (msr == (oldmsr & 0xffff)) {
513 pr_debug("no change needed - msr was and needs "
514 "to be %x\n", oldmsr);
515 retval = 0;
516 goto out;
517 }
518
519 freqs.old = extract_clock(oldmsr, cpu, 0);
520 freqs.new = extract_clock(msr, cpu, 0);
521
522 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
523 target_freq, freqs.old, freqs.new, msr);
524
525 for_each_cpu(k, policy->cpus) {
526 if (!cpu_online(k))
527 continue;
528 freqs.cpu = k;
529 cpufreq_notify_transition(&freqs,
530 CPUFREQ_PRECHANGE);
531 }
532
533 first_cpu = 0;
534 /* all but 16 LSB are reserved, treat them with care */
535 oldmsr &= ~0xffff;
536 msr &= 0xffff;
537 oldmsr |= msr;
538 }
539
540 wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
541 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
542 break;
543
544 cpumask_set_cpu(j, covered_cpus);
545 }
546
547 for_each_cpu(k, policy->cpus) {
548 if (!cpu_online(k))
549 continue;
550 freqs.cpu = k;
551 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
552 }
553
554 if (unlikely(retval)) {
555 /*
556 * We have failed halfway through the frequency change.
557 * We have sent callbacks to policy->cpus and
558 * MSRs have already been written on coverd_cpus.
559 * Best effort undo..
560 */
561
562 for_each_cpu(j, covered_cpus)
563 wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
564
565 tmp = freqs.new;
566 freqs.new = freqs.old;
567 freqs.old = tmp;
568 for_each_cpu(j, policy->cpus) {
569 if (!cpu_online(j))
570 continue;
571 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
572 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
573 }
574 }
575 retval = 0;
576
577 out:
578 free_cpumask_var(covered_cpus);
579 return retval;
580 }
581
582 static struct freq_attr* centrino_attr[] = {
583 &cpufreq_freq_attr_scaling_available_freqs,
584 NULL,
585 };
586
587 static struct cpufreq_driver centrino_driver = {
588 .name = "centrino", /* should be speedstep-centrino,
589 but there's a 16 char limit */
590 .init = centrino_cpu_init,
591 .exit = centrino_cpu_exit,
592 .verify = centrino_verify,
593 .target = centrino_target,
594 .get = get_cur_freq,
595 .attr = centrino_attr,
596 .owner = THIS_MODULE,
597 };
598
599 /*
600 * This doesn't replace the detailed checks above because
601 * the generic CPU IDs don't have a way to match for steppings
602 * or ASCII model IDs.
603 */
604 static const struct x86_cpu_id centrino_ids[] = {
605 { X86_VENDOR_INTEL, 6, 9, X86_FEATURE_EST },
606 { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
607 { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
608 { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
609 { X86_VENDOR_INTEL, 15, 3, X86_FEATURE_EST },
610 { X86_VENDOR_INTEL, 15, 4, X86_FEATURE_EST },
611 {}
612 };
613 #if 0
614 /* Autoload or not? Do not for now. */
615 MODULE_DEVICE_TABLE(x86cpu, centrino_ids);
616 #endif
617
618 /**
619 * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
620 *
621 * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
622 * unsupported devices, -ENOENT if there's no voltage table for this
623 * particular CPU model, -EINVAL on problems during initiatization,
624 * and zero on success.
625 *
626 * This is quite picky. Not only does the CPU have to advertise the
627 * "est" flag in the cpuid capability flags, we look for a specific
628 * CPU model and stepping, and we need to have the exact model name in
629 * our voltage tables. That is, be paranoid about not releasing
630 * someone's valuable magic smoke.
631 */
centrino_init(void)632 static int __init centrino_init(void)
633 {
634 if (!x86_match_cpu(centrino_ids))
635 return -ENODEV;
636 return cpufreq_register_driver(¢rino_driver);
637 }
638
centrino_exit(void)639 static void __exit centrino_exit(void)
640 {
641 cpufreq_unregister_driver(¢rino_driver);
642 }
643
644 MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
645 MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
646 MODULE_LICENSE ("GPL");
647
648 late_initcall(centrino_init);
649 module_exit(centrino_exit);
650