1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
31 #include <linux/syscore_ops.h>
32
33 #include <trace/events/power.h>
34
35 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
36 "cpufreq-core", msg)
37
38 /**
39 * The "cpufreq driver" - the arch- or hardware-dependent low
40 * level driver of CPUFreq support, and its spinlock. This lock
41 * also protects the cpufreq_cpu_data array.
42 */
43 static struct cpufreq_driver *cpufreq_driver;
44 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 #endif
49 static DEFINE_SPINLOCK(cpufreq_driver_lock);
50
51 /*
52 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
53 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 *
55 * The rules for this semaphore:
56 * - Any routine that wants to read from the policy structure will
57 * do a down_read on this semaphore.
58 * - Any routine that will write to the policy structure and/or may take away
59 * the policy altogether (eg. CPU hotplug), will hold this lock in write
60 * mode before doing so.
61 *
62 * Additional rules:
63 * - All holders of the lock should check to make sure that the CPU they
64 * are concerned with are online after they get the lock.
65 * - Governor routines that can be called in cpufreq hotplug path should not
66 * take this sem as top level hotplug notifier handler takes this.
67 * - Lock should not be held across
68 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
69 */
70 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
71 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
72
73 #define lock_policy_rwsem(mode, cpu) \
74 static int lock_policy_rwsem_##mode \
75 (int cpu) \
76 { \
77 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
78 BUG_ON(policy_cpu == -1); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 if (unlikely(!cpu_online(cpu))) { \
81 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
82 return -1; \
83 } \
84 \
85 return 0; \
86 }
87
88 lock_policy_rwsem(read, cpu);
89
90 lock_policy_rwsem(write, cpu);
91
unlock_policy_rwsem_read(int cpu)92 static void unlock_policy_rwsem_read(int cpu)
93 {
94 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
95 BUG_ON(policy_cpu == -1);
96 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
97 }
98
unlock_policy_rwsem_write(int cpu)99 static void unlock_policy_rwsem_write(int cpu)
100 {
101 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104 }
105
106
107 /* internal prototypes */
108 static int __cpufreq_governor(struct cpufreq_policy *policy,
109 unsigned int event);
110 static unsigned int __cpufreq_get(unsigned int cpu);
111 static void handle_update(struct work_struct *work);
112
113 /**
114 * Two notifier lists: the "policy" list is involved in the
115 * validation process for a new CPU frequency policy; the
116 * "transition" list for kernel code that needs to handle
117 * changes to devices when the CPU clock speed changes.
118 * The mutex locks both lists.
119 */
120 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
121 static struct srcu_notifier_head cpufreq_transition_notifier_list;
122
123 static bool init_cpufreq_transition_notifier_list_called;
init_cpufreq_transition_notifier_list(void)124 static int __init init_cpufreq_transition_notifier_list(void)
125 {
126 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
127 init_cpufreq_transition_notifier_list_called = true;
128 return 0;
129 }
130 pure_initcall(init_cpufreq_transition_notifier_list);
131
132 static LIST_HEAD(cpufreq_governor_list);
133 static DEFINE_MUTEX(cpufreq_governor_mutex);
134
cpufreq_cpu_get(unsigned int cpu)135 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
136 {
137 struct cpufreq_policy *data;
138 unsigned long flags;
139
140 if (cpu >= nr_cpu_ids)
141 goto err_out;
142
143 /* get the cpufreq driver */
144 spin_lock_irqsave(&cpufreq_driver_lock, flags);
145
146 if (!cpufreq_driver)
147 goto err_out_unlock;
148
149 if (!try_module_get(cpufreq_driver->owner))
150 goto err_out_unlock;
151
152
153 /* get the CPU */
154 data = per_cpu(cpufreq_cpu_data, cpu);
155
156 if (!data)
157 goto err_out_put_module;
158
159 if (!kobject_get(&data->kobj))
160 goto err_out_put_module;
161
162 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
163 return data;
164
165 err_out_put_module:
166 module_put(cpufreq_driver->owner);
167 err_out_unlock:
168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
169 err_out:
170 return NULL;
171 }
172 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
173
174
cpufreq_cpu_put(struct cpufreq_policy * data)175 void cpufreq_cpu_put(struct cpufreq_policy *data)
176 {
177 kobject_put(&data->kobj);
178 module_put(cpufreq_driver->owner);
179 }
180 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
181
182
183 /*********************************************************************
184 * UNIFIED DEBUG HELPERS *
185 *********************************************************************/
186 #ifdef CONFIG_CPU_FREQ_DEBUG
187
188 /* what part(s) of the CPUfreq subsystem are debugged? */
189 static unsigned int debug;
190
191 /* is the debug output ratelimit'ed using printk_ratelimit? User can
192 * set or modify this value.
193 */
194 static unsigned int debug_ratelimit = 1;
195
196 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
197 * loading of a cpufreq driver, temporarily disabled when a new policy
198 * is set, and disabled upon cpufreq driver removal
199 */
200 static unsigned int disable_ratelimit = 1;
201 static DEFINE_SPINLOCK(disable_ratelimit_lock);
202
cpufreq_debug_enable_ratelimit(void)203 static void cpufreq_debug_enable_ratelimit(void)
204 {
205 unsigned long flags;
206
207 spin_lock_irqsave(&disable_ratelimit_lock, flags);
208 if (disable_ratelimit)
209 disable_ratelimit--;
210 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
211 }
212
cpufreq_debug_disable_ratelimit(void)213 static void cpufreq_debug_disable_ratelimit(void)
214 {
215 unsigned long flags;
216
217 spin_lock_irqsave(&disable_ratelimit_lock, flags);
218 disable_ratelimit++;
219 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
220 }
221
cpufreq_debug_printk(unsigned int type,const char * prefix,const char * fmt,...)222 void cpufreq_debug_printk(unsigned int type, const char *prefix,
223 const char *fmt, ...)
224 {
225 char s[256];
226 va_list args;
227 unsigned int len;
228 unsigned long flags;
229
230 WARN_ON(!prefix);
231 if (type & debug) {
232 spin_lock_irqsave(&disable_ratelimit_lock, flags);
233 if (!disable_ratelimit && debug_ratelimit
234 && !printk_ratelimit()) {
235 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
236 return;
237 }
238 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
239
240 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
241
242 va_start(args, fmt);
243 len += vsnprintf(&s[len], (256 - len), fmt, args);
244 va_end(args);
245
246 printk(s);
247
248 WARN_ON(len < 5);
249 }
250 }
251 EXPORT_SYMBOL(cpufreq_debug_printk);
252
253
254 module_param(debug, uint, 0644);
255 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
256 " 2 to debug drivers, and 4 to debug governors.");
257
258 module_param(debug_ratelimit, uint, 0644);
259 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
260 " set to 0 to disable ratelimiting.");
261
262 #else /* !CONFIG_CPU_FREQ_DEBUG */
263
cpufreq_debug_enable_ratelimit(void)264 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
cpufreq_debug_disable_ratelimit(void)265 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
266
267 #endif /* CONFIG_CPU_FREQ_DEBUG */
268
269
270 /*********************************************************************
271 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
272 *********************************************************************/
273
274 /**
275 * adjust_jiffies - adjust the system "loops_per_jiffy"
276 *
277 * This function alters the system "loops_per_jiffy" for the clock
278 * speed change. Note that loops_per_jiffy cannot be updated on SMP
279 * systems as each CPU might be scaled differently. So, use the arch
280 * per-CPU loops_per_jiffy value wherever possible.
281 */
282 #ifndef CONFIG_SMP
283 static unsigned long l_p_j_ref;
284 static unsigned int l_p_j_ref_freq;
285
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)286 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
287 {
288 if (ci->flags & CPUFREQ_CONST_LOOPS)
289 return;
290
291 if (!l_p_j_ref_freq) {
292 l_p_j_ref = loops_per_jiffy;
293 l_p_j_ref_freq = ci->old;
294 dprintk("saving %lu as reference value for loops_per_jiffy; "
295 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
296 }
297 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
298 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
299 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
300 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
301 ci->new);
302 dprintk("scaling loops_per_jiffy to %lu "
303 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
304 }
305 }
306 #else
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)307 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
308 {
309 return;
310 }
311 #endif
312
313
314 /**
315 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
316 * on frequency transition.
317 *
318 * This function calls the transition notifiers and the "adjust_jiffies"
319 * function. It is called twice on all CPU frequency changes that have
320 * external effects.
321 */
cpufreq_notify_transition(struct cpufreq_freqs * freqs,unsigned int state)322 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
323 {
324 struct cpufreq_policy *policy;
325
326 BUG_ON(irqs_disabled());
327
328 freqs->flags = cpufreq_driver->flags;
329 dprintk("notification %u of frequency transition to %u kHz\n",
330 state, freqs->new);
331
332 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
333 switch (state) {
334
335 case CPUFREQ_PRECHANGE:
336 /* detect if the driver reported a value as "old frequency"
337 * which is not equal to what the cpufreq core thinks is
338 * "old frequency".
339 */
340 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
341 if ((policy) && (policy->cpu == freqs->cpu) &&
342 (policy->cur) && (policy->cur != freqs->old)) {
343 dprintk("Warning: CPU frequency is"
344 " %u, cpufreq assumed %u kHz.\n",
345 freqs->old, policy->cur);
346 freqs->old = policy->cur;
347 }
348 }
349 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
350 CPUFREQ_PRECHANGE, freqs);
351 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
352 break;
353
354 case CPUFREQ_POSTCHANGE:
355 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
356 dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
357 (unsigned long)freqs->cpu);
358 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
359 trace_cpu_frequency(freqs->new, freqs->cpu);
360 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
361 CPUFREQ_POSTCHANGE, freqs);
362 if (likely(policy) && likely(policy->cpu == freqs->cpu))
363 policy->cur = freqs->new;
364 break;
365 }
366 }
367 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
368
369
370
371 /*********************************************************************
372 * SYSFS INTERFACE *
373 *********************************************************************/
374
__find_governor(const char * str_governor)375 static struct cpufreq_governor *__find_governor(const char *str_governor)
376 {
377 struct cpufreq_governor *t;
378
379 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
380 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
381 return t;
382
383 return NULL;
384 }
385
386 /**
387 * cpufreq_parse_governor - parse a governor string
388 */
cpufreq_parse_governor(char * str_governor,unsigned int * policy,struct cpufreq_governor ** governor)389 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
390 struct cpufreq_governor **governor)
391 {
392 int err = -EINVAL;
393
394 if (!cpufreq_driver)
395 goto out;
396
397 if (cpufreq_driver->setpolicy) {
398 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
399 *policy = CPUFREQ_POLICY_PERFORMANCE;
400 err = 0;
401 } else if (!strnicmp(str_governor, "powersave",
402 CPUFREQ_NAME_LEN)) {
403 *policy = CPUFREQ_POLICY_POWERSAVE;
404 err = 0;
405 }
406 } else if (cpufreq_driver->target) {
407 struct cpufreq_governor *t;
408
409 mutex_lock(&cpufreq_governor_mutex);
410
411 t = __find_governor(str_governor);
412
413 if (t == NULL) {
414 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
415 str_governor);
416
417 if (name) {
418 int ret;
419
420 mutex_unlock(&cpufreq_governor_mutex);
421 ret = request_module("%s", name);
422 mutex_lock(&cpufreq_governor_mutex);
423
424 if (ret == 0)
425 t = __find_governor(str_governor);
426 }
427
428 kfree(name);
429 }
430
431 if (t != NULL) {
432 *governor = t;
433 err = 0;
434 }
435
436 mutex_unlock(&cpufreq_governor_mutex);
437 }
438 out:
439 return err;
440 }
441
442
443 /**
444 * cpufreq_per_cpu_attr_read() / show_##file_name() -
445 * print out cpufreq information
446 *
447 * Write out information from cpufreq_driver->policy[cpu]; object must be
448 * "unsigned int".
449 */
450
451 #define show_one(file_name, object) \
452 static ssize_t show_##file_name \
453 (struct cpufreq_policy *policy, char *buf) \
454 { \
455 return sprintf(buf, "%u\n", policy->object); \
456 }
457
458 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
459 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
460 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
461 show_one(scaling_min_freq, min);
462 show_one(scaling_max_freq, max);
463 show_one(scaling_cur_freq, cur);
464
465 static int __cpufreq_set_policy(struct cpufreq_policy *data,
466 struct cpufreq_policy *policy);
467
468 /**
469 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
470 */
471 #define store_one(file_name, object) \
472 static ssize_t store_##file_name \
473 (struct cpufreq_policy *policy, const char *buf, size_t count) \
474 { \
475 unsigned int ret = -EINVAL; \
476 struct cpufreq_policy new_policy; \
477 \
478 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
479 if (ret) \
480 return -EINVAL; \
481 \
482 ret = sscanf(buf, "%u", &new_policy.object); \
483 if (ret != 1) \
484 return -EINVAL; \
485 \
486 ret = __cpufreq_set_policy(policy, &new_policy); \
487 policy->user_policy.object = policy->object; \
488 \
489 return ret ? ret : count; \
490 }
491
492 store_one(scaling_min_freq, min);
493 store_one(scaling_max_freq, max);
494
495 /**
496 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
497 */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)498 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
499 char *buf)
500 {
501 unsigned int cur_freq = __cpufreq_get(policy->cpu);
502 if (!cur_freq)
503 return sprintf(buf, "<unknown>");
504 return sprintf(buf, "%u\n", cur_freq);
505 }
506
507
508 /**
509 * show_scaling_governor - show the current policy for the specified CPU
510 */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)511 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
512 {
513 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
514 return sprintf(buf, "powersave\n");
515 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
516 return sprintf(buf, "performance\n");
517 else if (policy->governor)
518 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
519 policy->governor->name);
520 return -EINVAL;
521 }
522
523
524 /**
525 * store_scaling_governor - store policy for the specified CPU
526 */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)527 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
528 const char *buf, size_t count)
529 {
530 unsigned int ret = -EINVAL;
531 char str_governor[16];
532 struct cpufreq_policy new_policy;
533
534 ret = cpufreq_get_policy(&new_policy, policy->cpu);
535 if (ret)
536 return ret;
537
538 ret = sscanf(buf, "%15s", str_governor);
539 if (ret != 1)
540 return -EINVAL;
541
542 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
543 &new_policy.governor))
544 return -EINVAL;
545
546 /* Do not use cpufreq_set_policy here or the user_policy.max
547 will be wrongly overridden */
548 ret = __cpufreq_set_policy(policy, &new_policy);
549
550 policy->user_policy.policy = policy->policy;
551 policy->user_policy.governor = policy->governor;
552
553 if (ret)
554 return ret;
555 else
556 return count;
557 }
558
559 /**
560 * show_scaling_driver - show the cpufreq driver currently loaded
561 */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)562 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
563 {
564 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
565 }
566
567 /**
568 * show_scaling_available_governors - show the available CPUfreq governors
569 */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)570 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
571 char *buf)
572 {
573 ssize_t i = 0;
574 struct cpufreq_governor *t;
575
576 if (!cpufreq_driver->target) {
577 i += sprintf(buf, "performance powersave");
578 goto out;
579 }
580
581 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
582 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
583 - (CPUFREQ_NAME_LEN + 2)))
584 goto out;
585 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
586 }
587 out:
588 i += sprintf(&buf[i], "\n");
589 return i;
590 }
591
show_cpus(const struct cpumask * mask,char * buf)592 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
593 {
594 ssize_t i = 0;
595 unsigned int cpu;
596
597 for_each_cpu(cpu, mask) {
598 if (i)
599 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
600 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
601 if (i >= (PAGE_SIZE - 5))
602 break;
603 }
604 i += sprintf(&buf[i], "\n");
605 return i;
606 }
607
608 /**
609 * show_related_cpus - show the CPUs affected by each transition even if
610 * hw coordination is in use
611 */
show_related_cpus(struct cpufreq_policy * policy,char * buf)612 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
613 {
614 if (cpumask_empty(policy->related_cpus))
615 return show_cpus(policy->cpus, buf);
616 return show_cpus(policy->related_cpus, buf);
617 }
618
619 /**
620 * show_affected_cpus - show the CPUs affected by each transition
621 */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)622 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
623 {
624 return show_cpus(policy->cpus, buf);
625 }
626
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)627 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
628 const char *buf, size_t count)
629 {
630 unsigned int freq = 0;
631 unsigned int ret;
632
633 if (!policy->governor || !policy->governor->store_setspeed)
634 return -EINVAL;
635
636 ret = sscanf(buf, "%u", &freq);
637 if (ret != 1)
638 return -EINVAL;
639
640 policy->governor->store_setspeed(policy, freq);
641
642 return count;
643 }
644
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)645 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
646 {
647 if (!policy->governor || !policy->governor->show_setspeed)
648 return sprintf(buf, "<unsupported>\n");
649
650 return policy->governor->show_setspeed(policy, buf);
651 }
652
653 /**
654 * show_scaling_driver - show the current cpufreq HW/BIOS limitation
655 */
show_bios_limit(struct cpufreq_policy * policy,char * buf)656 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
657 {
658 unsigned int limit;
659 int ret;
660 if (cpufreq_driver->bios_limit) {
661 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
662 if (!ret)
663 return sprintf(buf, "%u\n", limit);
664 }
665 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
666 }
667
668 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
669 cpufreq_freq_attr_ro(cpuinfo_min_freq);
670 cpufreq_freq_attr_ro(cpuinfo_max_freq);
671 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
672 cpufreq_freq_attr_ro(scaling_available_governors);
673 cpufreq_freq_attr_ro(scaling_driver);
674 cpufreq_freq_attr_ro(scaling_cur_freq);
675 cpufreq_freq_attr_ro(bios_limit);
676 cpufreq_freq_attr_ro(related_cpus);
677 cpufreq_freq_attr_ro(affected_cpus);
678 cpufreq_freq_attr_rw(scaling_min_freq);
679 cpufreq_freq_attr_rw(scaling_max_freq);
680 cpufreq_freq_attr_rw(scaling_governor);
681 cpufreq_freq_attr_rw(scaling_setspeed);
682
683 static struct attribute *default_attrs[] = {
684 &cpuinfo_min_freq.attr,
685 &cpuinfo_max_freq.attr,
686 &cpuinfo_transition_latency.attr,
687 &scaling_min_freq.attr,
688 &scaling_max_freq.attr,
689 &affected_cpus.attr,
690 &related_cpus.attr,
691 &scaling_governor.attr,
692 &scaling_driver.attr,
693 &scaling_available_governors.attr,
694 &scaling_setspeed.attr,
695 NULL
696 };
697
698 struct kobject *cpufreq_global_kobject;
699 EXPORT_SYMBOL(cpufreq_global_kobject);
700
701 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
702 #define to_attr(a) container_of(a, struct freq_attr, attr)
703
show(struct kobject * kobj,struct attribute * attr,char * buf)704 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
705 {
706 struct cpufreq_policy *policy = to_policy(kobj);
707 struct freq_attr *fattr = to_attr(attr);
708 ssize_t ret = -EINVAL;
709 policy = cpufreq_cpu_get(policy->cpu);
710 if (!policy)
711 goto no_policy;
712
713 if (lock_policy_rwsem_read(policy->cpu) < 0)
714 goto fail;
715
716 if (fattr->show)
717 ret = fattr->show(policy, buf);
718 else
719 ret = -EIO;
720
721 unlock_policy_rwsem_read(policy->cpu);
722 fail:
723 cpufreq_cpu_put(policy);
724 no_policy:
725 return ret;
726 }
727
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)728 static ssize_t store(struct kobject *kobj, struct attribute *attr,
729 const char *buf, size_t count)
730 {
731 struct cpufreq_policy *policy = to_policy(kobj);
732 struct freq_attr *fattr = to_attr(attr);
733 ssize_t ret = -EINVAL;
734 policy = cpufreq_cpu_get(policy->cpu);
735 if (!policy)
736 goto no_policy;
737
738 if (lock_policy_rwsem_write(policy->cpu) < 0)
739 goto fail;
740
741 if (fattr->store)
742 ret = fattr->store(policy, buf, count);
743 else
744 ret = -EIO;
745
746 unlock_policy_rwsem_write(policy->cpu);
747 fail:
748 cpufreq_cpu_put(policy);
749 no_policy:
750 return ret;
751 }
752
cpufreq_sysfs_release(struct kobject * kobj)753 static void cpufreq_sysfs_release(struct kobject *kobj)
754 {
755 struct cpufreq_policy *policy = to_policy(kobj);
756 dprintk("last reference is dropped\n");
757 complete(&policy->kobj_unregister);
758 }
759
760 static const struct sysfs_ops sysfs_ops = {
761 .show = show,
762 .store = store,
763 };
764
765 static struct kobj_type ktype_cpufreq = {
766 .sysfs_ops = &sysfs_ops,
767 .default_attrs = default_attrs,
768 .release = cpufreq_sysfs_release,
769 };
770
771 /*
772 * Returns:
773 * Negative: Failure
774 * 0: Success
775 * Positive: When we have a managed CPU and the sysfs got symlinked
776 */
cpufreq_add_dev_policy(unsigned int cpu,struct cpufreq_policy * policy,struct sys_device * sys_dev)777 static int cpufreq_add_dev_policy(unsigned int cpu,
778 struct cpufreq_policy *policy,
779 struct sys_device *sys_dev)
780 {
781 int ret = 0;
782 #ifdef CONFIG_SMP
783 unsigned long flags;
784 unsigned int j;
785 #ifdef CONFIG_HOTPLUG_CPU
786 struct cpufreq_governor *gov;
787
788 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
789 if (gov) {
790 policy->governor = gov;
791 dprintk("Restoring governor %s for cpu %d\n",
792 policy->governor->name, cpu);
793 }
794 #endif
795
796 for_each_cpu(j, policy->cpus) {
797 struct cpufreq_policy *managed_policy;
798
799 if (cpu == j)
800 continue;
801
802 /* Check for existing affected CPUs.
803 * They may not be aware of it due to CPU Hotplug.
804 * cpufreq_cpu_put is called when the device is removed
805 * in __cpufreq_remove_dev()
806 */
807 managed_policy = cpufreq_cpu_get(j);
808 if (unlikely(managed_policy)) {
809
810 /* Set proper policy_cpu */
811 unlock_policy_rwsem_write(cpu);
812 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
813
814 if (lock_policy_rwsem_write(cpu) < 0) {
815 /* Should not go through policy unlock path */
816 if (cpufreq_driver->exit)
817 cpufreq_driver->exit(policy);
818 cpufreq_cpu_put(managed_policy);
819 return -EBUSY;
820 }
821
822 spin_lock_irqsave(&cpufreq_driver_lock, flags);
823 cpumask_copy(managed_policy->cpus, policy->cpus);
824 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
825 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
826
827 dprintk("CPU already managed, adding link\n");
828 ret = sysfs_create_link(&sys_dev->kobj,
829 &managed_policy->kobj,
830 "cpufreq");
831 if (ret)
832 cpufreq_cpu_put(managed_policy);
833 /*
834 * Success. We only needed to be added to the mask.
835 * Call driver->exit() because only the cpu parent of
836 * the kobj needed to call init().
837 */
838 if (cpufreq_driver->exit)
839 cpufreq_driver->exit(policy);
840
841 if (!ret)
842 return 1;
843 else
844 return ret;
845 }
846 }
847 #endif
848 return ret;
849 }
850
851
852 /* symlink affected CPUs */
cpufreq_add_dev_symlink(unsigned int cpu,struct cpufreq_policy * policy)853 static int cpufreq_add_dev_symlink(unsigned int cpu,
854 struct cpufreq_policy *policy)
855 {
856 unsigned int j;
857 int ret = 0;
858
859 for_each_cpu(j, policy->cpus) {
860 struct cpufreq_policy *managed_policy;
861 struct sys_device *cpu_sys_dev;
862
863 if (j == cpu)
864 continue;
865 if (!cpu_online(j))
866 continue;
867
868 dprintk("CPU %u already managed, adding link\n", j);
869 managed_policy = cpufreq_cpu_get(cpu);
870 cpu_sys_dev = get_cpu_sysdev(j);
871 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
872 "cpufreq");
873 if (ret) {
874 cpufreq_cpu_put(managed_policy);
875 return ret;
876 }
877 }
878 return ret;
879 }
880
cpufreq_add_dev_interface(unsigned int cpu,struct cpufreq_policy * policy,struct sys_device * sys_dev)881 static int cpufreq_add_dev_interface(unsigned int cpu,
882 struct cpufreq_policy *policy,
883 struct sys_device *sys_dev)
884 {
885 struct cpufreq_policy new_policy;
886 struct freq_attr **drv_attr;
887 unsigned long flags;
888 int ret = 0;
889 unsigned int j;
890
891 /* prepare interface data */
892 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
893 &sys_dev->kobj, "cpufreq");
894 if (ret)
895 return ret;
896
897 /* set up files for this cpu device */
898 drv_attr = cpufreq_driver->attr;
899 while ((drv_attr) && (*drv_attr)) {
900 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
901 if (ret)
902 goto err_out_kobj_put;
903 drv_attr++;
904 }
905 if (cpufreq_driver->get) {
906 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
907 if (ret)
908 goto err_out_kobj_put;
909 }
910 if (cpufreq_driver->target) {
911 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
912 if (ret)
913 goto err_out_kobj_put;
914 }
915 if (cpufreq_driver->bios_limit) {
916 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
917 if (ret)
918 goto err_out_kobj_put;
919 }
920
921 spin_lock_irqsave(&cpufreq_driver_lock, flags);
922 for_each_cpu(j, policy->cpus) {
923 if (!cpu_online(j))
924 continue;
925 per_cpu(cpufreq_cpu_data, j) = policy;
926 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
927 }
928 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
929
930 ret = cpufreq_add_dev_symlink(cpu, policy);
931 if (ret)
932 goto err_out_kobj_put;
933
934 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
935 /* assure that the starting sequence is run in __cpufreq_set_policy */
936 policy->governor = NULL;
937
938 /* set default policy */
939 ret = __cpufreq_set_policy(policy, &new_policy);
940 policy->user_policy.policy = policy->policy;
941 policy->user_policy.governor = policy->governor;
942
943 if (ret) {
944 dprintk("setting policy failed\n");
945 if (cpufreq_driver->exit)
946 cpufreq_driver->exit(policy);
947 }
948 return ret;
949
950 err_out_kobj_put:
951 kobject_put(&policy->kobj);
952 wait_for_completion(&policy->kobj_unregister);
953 return ret;
954 }
955
956
957 /**
958 * cpufreq_add_dev - add a CPU device
959 *
960 * Adds the cpufreq interface for a CPU device.
961 *
962 * The Oracle says: try running cpufreq registration/unregistration concurrently
963 * with with cpu hotplugging and all hell will break loose. Tried to clean this
964 * mess up, but more thorough testing is needed. - Mathieu
965 */
cpufreq_add_dev(struct sys_device * sys_dev)966 static int cpufreq_add_dev(struct sys_device *sys_dev)
967 {
968 unsigned int cpu = sys_dev->id;
969 int ret = 0, found = 0;
970 struct cpufreq_policy *policy;
971 unsigned long flags;
972 unsigned int j;
973 #ifdef CONFIG_HOTPLUG_CPU
974 int sibling;
975 #endif
976
977 if (cpu_is_offline(cpu))
978 return 0;
979
980 cpufreq_debug_disable_ratelimit();
981 dprintk("adding CPU %u\n", cpu);
982
983 #ifdef CONFIG_SMP
984 /* check whether a different CPU already registered this
985 * CPU because it is in the same boat. */
986 policy = cpufreq_cpu_get(cpu);
987 if (unlikely(policy)) {
988 cpufreq_cpu_put(policy);
989 cpufreq_debug_enable_ratelimit();
990 return 0;
991 }
992 #endif
993
994 if (!try_module_get(cpufreq_driver->owner)) {
995 ret = -EINVAL;
996 goto module_out;
997 }
998
999 ret = -ENOMEM;
1000 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
1001 if (!policy)
1002 goto nomem_out;
1003
1004 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1005 goto err_free_policy;
1006
1007 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1008 goto err_free_cpumask;
1009
1010 policy->cpu = cpu;
1011 cpumask_copy(policy->cpus, cpumask_of(cpu));
1012
1013 /* Initially set CPU itself as the policy_cpu */
1014 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1015 ret = (lock_policy_rwsem_write(cpu) < 0);
1016 WARN_ON(ret);
1017
1018 init_completion(&policy->kobj_unregister);
1019 INIT_WORK(&policy->update, handle_update);
1020
1021 /* Set governor before ->init, so that driver could check it */
1022 #ifdef CONFIG_HOTPLUG_CPU
1023 for_each_online_cpu(sibling) {
1024 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1025 if (cp && cp->governor &&
1026 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1027 policy->governor = cp->governor;
1028 found = 1;
1029 break;
1030 }
1031 }
1032 #endif
1033 if (!found)
1034 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1035 /* call driver. From then on the cpufreq must be able
1036 * to accept all calls to ->verify and ->setpolicy for this CPU
1037 */
1038 ret = cpufreq_driver->init(policy);
1039 if (ret) {
1040 dprintk("initialization failed\n");
1041 goto err_unlock_policy;
1042 }
1043 policy->user_policy.min = policy->min;
1044 policy->user_policy.max = policy->max;
1045
1046 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1047 CPUFREQ_START, policy);
1048
1049 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1050 if (ret) {
1051 if (ret > 0)
1052 /* This is a managed cpu, symlink created,
1053 exit with 0 */
1054 ret = 0;
1055 goto err_unlock_policy;
1056 }
1057
1058 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1059 if (ret)
1060 goto err_out_unregister;
1061
1062 unlock_policy_rwsem_write(cpu);
1063
1064 kobject_uevent(&policy->kobj, KOBJ_ADD);
1065 module_put(cpufreq_driver->owner);
1066 dprintk("initialization complete\n");
1067 cpufreq_debug_enable_ratelimit();
1068
1069 return 0;
1070
1071
1072 err_out_unregister:
1073 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1074 for_each_cpu(j, policy->cpus)
1075 per_cpu(cpufreq_cpu_data, j) = NULL;
1076 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1077
1078 kobject_put(&policy->kobj);
1079 wait_for_completion(&policy->kobj_unregister);
1080
1081 err_unlock_policy:
1082 unlock_policy_rwsem_write(cpu);
1083 free_cpumask_var(policy->related_cpus);
1084 err_free_cpumask:
1085 free_cpumask_var(policy->cpus);
1086 err_free_policy:
1087 kfree(policy);
1088 nomem_out:
1089 module_put(cpufreq_driver->owner);
1090 module_out:
1091 cpufreq_debug_enable_ratelimit();
1092 return ret;
1093 }
1094
1095
1096 /**
1097 * __cpufreq_remove_dev - remove a CPU device
1098 *
1099 * Removes the cpufreq interface for a CPU device.
1100 * Caller should already have policy_rwsem in write mode for this CPU.
1101 * This routine frees the rwsem before returning.
1102 */
__cpufreq_remove_dev(struct sys_device * sys_dev)1103 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1104 {
1105 unsigned int cpu = sys_dev->id;
1106 unsigned long flags;
1107 struct cpufreq_policy *data;
1108 struct kobject *kobj;
1109 struct completion *cmp;
1110 #ifdef CONFIG_SMP
1111 struct sys_device *cpu_sys_dev;
1112 unsigned int j;
1113 #endif
1114
1115 cpufreq_debug_disable_ratelimit();
1116 dprintk("unregistering CPU %u\n", cpu);
1117
1118 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1119 data = per_cpu(cpufreq_cpu_data, cpu);
1120
1121 if (!data) {
1122 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1123 cpufreq_debug_enable_ratelimit();
1124 unlock_policy_rwsem_write(cpu);
1125 return -EINVAL;
1126 }
1127 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1128
1129
1130 #ifdef CONFIG_SMP
1131 /* if this isn't the CPU which is the parent of the kobj, we
1132 * only need to unlink, put and exit
1133 */
1134 if (unlikely(cpu != data->cpu)) {
1135 dprintk("removing link\n");
1136 cpumask_clear_cpu(cpu, data->cpus);
1137 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1138 kobj = &sys_dev->kobj;
1139 cpufreq_cpu_put(data);
1140 cpufreq_debug_enable_ratelimit();
1141 unlock_policy_rwsem_write(cpu);
1142 sysfs_remove_link(kobj, "cpufreq");
1143 return 0;
1144 }
1145 #endif
1146
1147 #ifdef CONFIG_SMP
1148
1149 #ifdef CONFIG_HOTPLUG_CPU
1150 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1151 CPUFREQ_NAME_LEN);
1152 #endif
1153
1154 /* if we have other CPUs still registered, we need to unlink them,
1155 * or else wait_for_completion below will lock up. Clean the
1156 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1157 * the sysfs links afterwards.
1158 */
1159 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1160 for_each_cpu(j, data->cpus) {
1161 if (j == cpu)
1162 continue;
1163 per_cpu(cpufreq_cpu_data, j) = NULL;
1164 }
1165 }
1166
1167 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1168
1169 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1170 for_each_cpu(j, data->cpus) {
1171 if (j == cpu)
1172 continue;
1173 dprintk("removing link for cpu %u\n", j);
1174 #ifdef CONFIG_HOTPLUG_CPU
1175 strncpy(per_cpu(cpufreq_cpu_governor, j),
1176 data->governor->name, CPUFREQ_NAME_LEN);
1177 #endif
1178 cpu_sys_dev = get_cpu_sysdev(j);
1179 kobj = &cpu_sys_dev->kobj;
1180 unlock_policy_rwsem_write(cpu);
1181 sysfs_remove_link(kobj, "cpufreq");
1182 lock_policy_rwsem_write(cpu);
1183 cpufreq_cpu_put(data);
1184 }
1185 }
1186 #else
1187 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1188 #endif
1189
1190 if (cpufreq_driver->target)
1191 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1192
1193 kobj = &data->kobj;
1194 cmp = &data->kobj_unregister;
1195 unlock_policy_rwsem_write(cpu);
1196 kobject_put(kobj);
1197
1198 /* we need to make sure that the underlying kobj is actually
1199 * not referenced anymore by anybody before we proceed with
1200 * unloading.
1201 */
1202 dprintk("waiting for dropping of refcount\n");
1203 wait_for_completion(cmp);
1204 dprintk("wait complete\n");
1205
1206 lock_policy_rwsem_write(cpu);
1207 if (cpufreq_driver->exit)
1208 cpufreq_driver->exit(data);
1209 unlock_policy_rwsem_write(cpu);
1210
1211 free_cpumask_var(data->related_cpus);
1212 free_cpumask_var(data->cpus);
1213 kfree(data);
1214 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1215
1216 cpufreq_debug_enable_ratelimit();
1217 return 0;
1218 }
1219
1220
cpufreq_remove_dev(struct sys_device * sys_dev)1221 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1222 {
1223 unsigned int cpu = sys_dev->id;
1224 int retval;
1225
1226 if (cpu_is_offline(cpu))
1227 return 0;
1228
1229 if (unlikely(lock_policy_rwsem_write(cpu)))
1230 BUG();
1231
1232 retval = __cpufreq_remove_dev(sys_dev);
1233 return retval;
1234 }
1235
1236
handle_update(struct work_struct * work)1237 static void handle_update(struct work_struct *work)
1238 {
1239 struct cpufreq_policy *policy =
1240 container_of(work, struct cpufreq_policy, update);
1241 unsigned int cpu = policy->cpu;
1242 dprintk("handle_update for cpu %u called\n", cpu);
1243 cpufreq_update_policy(cpu);
1244 }
1245
1246 /**
1247 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1248 * @cpu: cpu number
1249 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1250 * @new_freq: CPU frequency the CPU actually runs at
1251 *
1252 * We adjust to current frequency first, and need to clean up later.
1253 * So either call to cpufreq_update_policy() or schedule handle_update()).
1254 */
cpufreq_out_of_sync(unsigned int cpu,unsigned int old_freq,unsigned int new_freq)1255 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1256 unsigned int new_freq)
1257 {
1258 struct cpufreq_freqs freqs;
1259
1260 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1261 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1262
1263 freqs.cpu = cpu;
1264 freqs.old = old_freq;
1265 freqs.new = new_freq;
1266 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1267 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1268 }
1269
1270
1271 /**
1272 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1273 * @cpu: CPU number
1274 *
1275 * This is the last known freq, without actually getting it from the driver.
1276 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1277 */
cpufreq_quick_get(unsigned int cpu)1278 unsigned int cpufreq_quick_get(unsigned int cpu)
1279 {
1280 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1281 unsigned int ret_freq = 0;
1282
1283 if (policy) {
1284 ret_freq = policy->cur;
1285 cpufreq_cpu_put(policy);
1286 }
1287
1288 return ret_freq;
1289 }
1290 EXPORT_SYMBOL(cpufreq_quick_get);
1291
1292
__cpufreq_get(unsigned int cpu)1293 static unsigned int __cpufreq_get(unsigned int cpu)
1294 {
1295 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1296 unsigned int ret_freq = 0;
1297
1298 if (!cpufreq_driver->get)
1299 return ret_freq;
1300
1301 ret_freq = cpufreq_driver->get(cpu);
1302
1303 if (ret_freq && policy->cur &&
1304 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1305 /* verify no discrepancy between actual and
1306 saved value exists */
1307 if (unlikely(ret_freq != policy->cur)) {
1308 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1309 schedule_work(&policy->update);
1310 }
1311 }
1312
1313 return ret_freq;
1314 }
1315
1316 /**
1317 * cpufreq_get - get the current CPU frequency (in kHz)
1318 * @cpu: CPU number
1319 *
1320 * Get the CPU current (static) CPU frequency
1321 */
cpufreq_get(unsigned int cpu)1322 unsigned int cpufreq_get(unsigned int cpu)
1323 {
1324 unsigned int ret_freq = 0;
1325 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1326
1327 if (!policy)
1328 goto out;
1329
1330 if (unlikely(lock_policy_rwsem_read(cpu)))
1331 goto out_policy;
1332
1333 ret_freq = __cpufreq_get(cpu);
1334
1335 unlock_policy_rwsem_read(cpu);
1336
1337 out_policy:
1338 cpufreq_cpu_put(policy);
1339 out:
1340 return ret_freq;
1341 }
1342 EXPORT_SYMBOL(cpufreq_get);
1343
1344 static struct sysdev_driver cpufreq_sysdev_driver = {
1345 .add = cpufreq_add_dev,
1346 .remove = cpufreq_remove_dev,
1347 };
1348
1349
1350 /**
1351 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1352 *
1353 * This function is only executed for the boot processor. The other CPUs
1354 * have been put offline by means of CPU hotplug.
1355 */
cpufreq_bp_suspend(void)1356 static int cpufreq_bp_suspend(void)
1357 {
1358 int ret = 0;
1359
1360 int cpu = smp_processor_id();
1361 struct cpufreq_policy *cpu_policy;
1362
1363 dprintk("suspending cpu %u\n", cpu);
1364
1365 /* If there's no policy for the boot CPU, we have nothing to do. */
1366 cpu_policy = cpufreq_cpu_get(cpu);
1367 if (!cpu_policy)
1368 return 0;
1369
1370 if (cpufreq_driver->suspend) {
1371 ret = cpufreq_driver->suspend(cpu_policy);
1372 if (ret)
1373 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1374 "step on CPU %u\n", cpu_policy->cpu);
1375 }
1376
1377 cpufreq_cpu_put(cpu_policy);
1378 return ret;
1379 }
1380
1381 /**
1382 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1383 *
1384 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1385 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1386 * restored. It will verify that the current freq is in sync with
1387 * what we believe it to be. This is a bit later than when it
1388 * should be, but nonethteless it's better than calling
1389 * cpufreq_driver->get() here which might re-enable interrupts...
1390 *
1391 * This function is only executed for the boot CPU. The other CPUs have not
1392 * been turned on yet.
1393 */
cpufreq_bp_resume(void)1394 static void cpufreq_bp_resume(void)
1395 {
1396 int ret = 0;
1397
1398 int cpu = smp_processor_id();
1399 struct cpufreq_policy *cpu_policy;
1400
1401 dprintk("resuming cpu %u\n", cpu);
1402
1403 /* If there's no policy for the boot CPU, we have nothing to do. */
1404 cpu_policy = cpufreq_cpu_get(cpu);
1405 if (!cpu_policy)
1406 return;
1407
1408 if (cpufreq_driver->resume) {
1409 ret = cpufreq_driver->resume(cpu_policy);
1410 if (ret) {
1411 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1412 "step on CPU %u\n", cpu_policy->cpu);
1413 goto fail;
1414 }
1415 }
1416
1417 schedule_work(&cpu_policy->update);
1418
1419 fail:
1420 cpufreq_cpu_put(cpu_policy);
1421 }
1422
1423 static struct syscore_ops cpufreq_syscore_ops = {
1424 .suspend = cpufreq_bp_suspend,
1425 .resume = cpufreq_bp_resume,
1426 };
1427
1428
1429 /*********************************************************************
1430 * NOTIFIER LISTS INTERFACE *
1431 *********************************************************************/
1432
1433 /**
1434 * cpufreq_register_notifier - register a driver with cpufreq
1435 * @nb: notifier function to register
1436 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1437 *
1438 * Add a driver to one of two lists: either a list of drivers that
1439 * are notified about clock rate changes (once before and once after
1440 * the transition), or a list of drivers that are notified about
1441 * changes in cpufreq policy.
1442 *
1443 * This function may sleep, and has the same return conditions as
1444 * blocking_notifier_chain_register.
1445 */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)1446 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1447 {
1448 int ret;
1449
1450 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1451
1452 switch (list) {
1453 case CPUFREQ_TRANSITION_NOTIFIER:
1454 ret = srcu_notifier_chain_register(
1455 &cpufreq_transition_notifier_list, nb);
1456 break;
1457 case CPUFREQ_POLICY_NOTIFIER:
1458 ret = blocking_notifier_chain_register(
1459 &cpufreq_policy_notifier_list, nb);
1460 break;
1461 default:
1462 ret = -EINVAL;
1463 }
1464
1465 return ret;
1466 }
1467 EXPORT_SYMBOL(cpufreq_register_notifier);
1468
1469
1470 /**
1471 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1472 * @nb: notifier block to be unregistered
1473 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1474 *
1475 * Remove a driver from the CPU frequency notifier list.
1476 *
1477 * This function may sleep, and has the same return conditions as
1478 * blocking_notifier_chain_unregister.
1479 */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)1480 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1481 {
1482 int ret;
1483
1484 switch (list) {
1485 case CPUFREQ_TRANSITION_NOTIFIER:
1486 ret = srcu_notifier_chain_unregister(
1487 &cpufreq_transition_notifier_list, nb);
1488 break;
1489 case CPUFREQ_POLICY_NOTIFIER:
1490 ret = blocking_notifier_chain_unregister(
1491 &cpufreq_policy_notifier_list, nb);
1492 break;
1493 default:
1494 ret = -EINVAL;
1495 }
1496
1497 return ret;
1498 }
1499 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1500
1501
1502 /*********************************************************************
1503 * GOVERNORS *
1504 *********************************************************************/
1505
1506
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1507 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1508 unsigned int target_freq,
1509 unsigned int relation)
1510 {
1511 int retval = -EINVAL;
1512
1513 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1514 target_freq, relation);
1515 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1516 retval = cpufreq_driver->target(policy, target_freq, relation);
1517
1518 return retval;
1519 }
1520 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1521
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1522 int cpufreq_driver_target(struct cpufreq_policy *policy,
1523 unsigned int target_freq,
1524 unsigned int relation)
1525 {
1526 int ret = -EINVAL;
1527
1528 policy = cpufreq_cpu_get(policy->cpu);
1529 if (!policy)
1530 goto no_policy;
1531
1532 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1533 goto fail;
1534
1535 ret = __cpufreq_driver_target(policy, target_freq, relation);
1536
1537 unlock_policy_rwsem_write(policy->cpu);
1538
1539 fail:
1540 cpufreq_cpu_put(policy);
1541 no_policy:
1542 return ret;
1543 }
1544 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1545
__cpufreq_driver_getavg(struct cpufreq_policy * policy,unsigned int cpu)1546 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1547 {
1548 int ret = 0;
1549
1550 policy = cpufreq_cpu_get(policy->cpu);
1551 if (!policy)
1552 return -EINVAL;
1553
1554 if (cpu_online(cpu) && cpufreq_driver->getavg)
1555 ret = cpufreq_driver->getavg(policy, cpu);
1556
1557 cpufreq_cpu_put(policy);
1558 return ret;
1559 }
1560 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1561
1562 /*
1563 * when "event" is CPUFREQ_GOV_LIMITS
1564 */
1565
__cpufreq_governor(struct cpufreq_policy * policy,unsigned int event)1566 static int __cpufreq_governor(struct cpufreq_policy *policy,
1567 unsigned int event)
1568 {
1569 int ret;
1570
1571 /* Only must be defined when default governor is known to have latency
1572 restrictions, like e.g. conservative or ondemand.
1573 That this is the case is already ensured in Kconfig
1574 */
1575 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1576 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1577 #else
1578 struct cpufreq_governor *gov = NULL;
1579 #endif
1580
1581 if (policy->governor->max_transition_latency &&
1582 policy->cpuinfo.transition_latency >
1583 policy->governor->max_transition_latency) {
1584 if (!gov)
1585 return -EINVAL;
1586 else {
1587 printk(KERN_WARNING "%s governor failed, too long"
1588 " transition latency of HW, fallback"
1589 " to %s governor\n",
1590 policy->governor->name,
1591 gov->name);
1592 policy->governor = gov;
1593 }
1594 }
1595
1596 if (!try_module_get(policy->governor->owner))
1597 return -EINVAL;
1598
1599 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1600 policy->cpu, event);
1601 ret = policy->governor->governor(policy, event);
1602
1603 /* we keep one module reference alive for
1604 each CPU governed by this CPU */
1605 if ((event != CPUFREQ_GOV_START) || ret)
1606 module_put(policy->governor->owner);
1607 if ((event == CPUFREQ_GOV_STOP) && !ret)
1608 module_put(policy->governor->owner);
1609
1610 return ret;
1611 }
1612
1613
cpufreq_register_governor(struct cpufreq_governor * governor)1614 int cpufreq_register_governor(struct cpufreq_governor *governor)
1615 {
1616 int err;
1617
1618 if (!governor)
1619 return -EINVAL;
1620
1621 mutex_lock(&cpufreq_governor_mutex);
1622
1623 err = -EBUSY;
1624 if (__find_governor(governor->name) == NULL) {
1625 err = 0;
1626 list_add(&governor->governor_list, &cpufreq_governor_list);
1627 }
1628
1629 mutex_unlock(&cpufreq_governor_mutex);
1630 return err;
1631 }
1632 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1633
1634
cpufreq_unregister_governor(struct cpufreq_governor * governor)1635 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1636 {
1637 #ifdef CONFIG_HOTPLUG_CPU
1638 int cpu;
1639 #endif
1640
1641 if (!governor)
1642 return;
1643
1644 #ifdef CONFIG_HOTPLUG_CPU
1645 for_each_present_cpu(cpu) {
1646 if (cpu_online(cpu))
1647 continue;
1648 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1649 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1650 }
1651 #endif
1652
1653 mutex_lock(&cpufreq_governor_mutex);
1654 list_del(&governor->governor_list);
1655 mutex_unlock(&cpufreq_governor_mutex);
1656 return;
1657 }
1658 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1659
1660
1661
1662 /*********************************************************************
1663 * POLICY INTERFACE *
1664 *********************************************************************/
1665
1666 /**
1667 * cpufreq_get_policy - get the current cpufreq_policy
1668 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1669 * is written
1670 *
1671 * Reads the current cpufreq policy.
1672 */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)1673 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1674 {
1675 struct cpufreq_policy *cpu_policy;
1676 if (!policy)
1677 return -EINVAL;
1678
1679 cpu_policy = cpufreq_cpu_get(cpu);
1680 if (!cpu_policy)
1681 return -EINVAL;
1682
1683 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1684
1685 cpufreq_cpu_put(cpu_policy);
1686 return 0;
1687 }
1688 EXPORT_SYMBOL(cpufreq_get_policy);
1689
1690
1691 /*
1692 * data : current policy.
1693 * policy : policy to be set.
1694 */
__cpufreq_set_policy(struct cpufreq_policy * data,struct cpufreq_policy * policy)1695 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1696 struct cpufreq_policy *policy)
1697 {
1698 int ret = 0;
1699
1700 cpufreq_debug_disable_ratelimit();
1701 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1702 policy->min, policy->max);
1703
1704 memcpy(&policy->cpuinfo, &data->cpuinfo,
1705 sizeof(struct cpufreq_cpuinfo));
1706
1707 if (policy->min > data->max || policy->max < data->min) {
1708 ret = -EINVAL;
1709 goto error_out;
1710 }
1711
1712 /* verify the cpu speed can be set within this limit */
1713 ret = cpufreq_driver->verify(policy);
1714 if (ret)
1715 goto error_out;
1716
1717 /* adjust if necessary - all reasons */
1718 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1719 CPUFREQ_ADJUST, policy);
1720
1721 /* adjust if necessary - hardware incompatibility*/
1722 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1723 CPUFREQ_INCOMPATIBLE, policy);
1724
1725 /* verify the cpu speed can be set within this limit,
1726 which might be different to the first one */
1727 ret = cpufreq_driver->verify(policy);
1728 if (ret)
1729 goto error_out;
1730
1731 /* notification of the new policy */
1732 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1733 CPUFREQ_NOTIFY, policy);
1734
1735 data->min = policy->min;
1736 data->max = policy->max;
1737
1738 dprintk("new min and max freqs are %u - %u kHz\n",
1739 data->min, data->max);
1740
1741 if (cpufreq_driver->setpolicy) {
1742 data->policy = policy->policy;
1743 dprintk("setting range\n");
1744 ret = cpufreq_driver->setpolicy(policy);
1745 } else {
1746 if (policy->governor != data->governor) {
1747 /* save old, working values */
1748 struct cpufreq_governor *old_gov = data->governor;
1749
1750 dprintk("governor switch\n");
1751
1752 /* end old governor */
1753 if (data->governor)
1754 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1755
1756 /* start new governor */
1757 data->governor = policy->governor;
1758 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1759 /* new governor failed, so re-start old one */
1760 dprintk("starting governor %s failed\n",
1761 data->governor->name);
1762 if (old_gov) {
1763 data->governor = old_gov;
1764 __cpufreq_governor(data,
1765 CPUFREQ_GOV_START);
1766 }
1767 ret = -EINVAL;
1768 goto error_out;
1769 }
1770 /* might be a policy change, too, so fall through */
1771 }
1772 dprintk("governor: change or update limits\n");
1773 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1774 }
1775
1776 error_out:
1777 cpufreq_debug_enable_ratelimit();
1778 return ret;
1779 }
1780
1781 /**
1782 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1783 * @cpu: CPU which shall be re-evaluated
1784 *
1785 * Useful for policy notifiers which have different necessities
1786 * at different times.
1787 */
cpufreq_update_policy(unsigned int cpu)1788 int cpufreq_update_policy(unsigned int cpu)
1789 {
1790 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1791 struct cpufreq_policy policy;
1792 int ret;
1793
1794 if (!data) {
1795 ret = -ENODEV;
1796 goto no_policy;
1797 }
1798
1799 if (unlikely(lock_policy_rwsem_write(cpu))) {
1800 ret = -EINVAL;
1801 goto fail;
1802 }
1803
1804 dprintk("updating policy for CPU %u\n", cpu);
1805 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1806 policy.min = data->user_policy.min;
1807 policy.max = data->user_policy.max;
1808 policy.policy = data->user_policy.policy;
1809 policy.governor = data->user_policy.governor;
1810
1811 /* BIOS might change freq behind our back
1812 -> ask driver for current freq and notify governors about a change */
1813 if (cpufreq_driver->get) {
1814 policy.cur = cpufreq_driver->get(cpu);
1815 if (!data->cur) {
1816 dprintk("Driver did not initialize current freq");
1817 data->cur = policy.cur;
1818 } else {
1819 if (data->cur != policy.cur)
1820 cpufreq_out_of_sync(cpu, data->cur,
1821 policy.cur);
1822 }
1823 }
1824
1825 ret = __cpufreq_set_policy(data, &policy);
1826
1827 unlock_policy_rwsem_write(cpu);
1828
1829 fail:
1830 cpufreq_cpu_put(data);
1831 no_policy:
1832 return ret;
1833 }
1834 EXPORT_SYMBOL(cpufreq_update_policy);
1835
cpufreq_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1836 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1837 unsigned long action, void *hcpu)
1838 {
1839 unsigned int cpu = (unsigned long)hcpu;
1840 struct sys_device *sys_dev;
1841
1842 sys_dev = get_cpu_sysdev(cpu);
1843 if (sys_dev) {
1844 switch (action) {
1845 case CPU_ONLINE:
1846 case CPU_ONLINE_FROZEN:
1847 cpufreq_add_dev(sys_dev);
1848 break;
1849 case CPU_DOWN_PREPARE:
1850 case CPU_DOWN_PREPARE_FROZEN:
1851 if (unlikely(lock_policy_rwsem_write(cpu)))
1852 BUG();
1853
1854 __cpufreq_remove_dev(sys_dev);
1855 break;
1856 case CPU_DOWN_FAILED:
1857 case CPU_DOWN_FAILED_FROZEN:
1858 cpufreq_add_dev(sys_dev);
1859 break;
1860 }
1861 }
1862 return NOTIFY_OK;
1863 }
1864
1865 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1866 .notifier_call = cpufreq_cpu_callback,
1867 };
1868
1869 /*********************************************************************
1870 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1871 *********************************************************************/
1872
1873 /**
1874 * cpufreq_register_driver - register a CPU Frequency driver
1875 * @driver_data: A struct cpufreq_driver containing the values#
1876 * submitted by the CPU Frequency driver.
1877 *
1878 * Registers a CPU Frequency driver to this core code. This code
1879 * returns zero on success, -EBUSY when another driver got here first
1880 * (and isn't unregistered in the meantime).
1881 *
1882 */
cpufreq_register_driver(struct cpufreq_driver * driver_data)1883 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1884 {
1885 unsigned long flags;
1886 int ret;
1887
1888 if (!driver_data || !driver_data->verify || !driver_data->init ||
1889 ((!driver_data->setpolicy) && (!driver_data->target)))
1890 return -EINVAL;
1891
1892 dprintk("trying to register driver %s\n", driver_data->name);
1893
1894 if (driver_data->setpolicy)
1895 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1896
1897 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1898 if (cpufreq_driver) {
1899 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1900 return -EBUSY;
1901 }
1902 cpufreq_driver = driver_data;
1903 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1904
1905 ret = sysdev_driver_register(&cpu_sysdev_class,
1906 &cpufreq_sysdev_driver);
1907 if (ret)
1908 goto err_null_driver;
1909
1910 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1911 int i;
1912 ret = -ENODEV;
1913
1914 /* check for at least one working CPU */
1915 for (i = 0; i < nr_cpu_ids; i++)
1916 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1917 ret = 0;
1918 break;
1919 }
1920
1921 /* if all ->init() calls failed, unregister */
1922 if (ret) {
1923 dprintk("no CPU initialized for driver %s\n",
1924 driver_data->name);
1925 goto err_sysdev_unreg;
1926 }
1927 }
1928
1929 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1930 dprintk("driver %s up and running\n", driver_data->name);
1931 cpufreq_debug_enable_ratelimit();
1932
1933 return 0;
1934 err_sysdev_unreg:
1935 sysdev_driver_unregister(&cpu_sysdev_class,
1936 &cpufreq_sysdev_driver);
1937 err_null_driver:
1938 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1939 cpufreq_driver = NULL;
1940 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1941 return ret;
1942 }
1943 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1944
1945
1946 /**
1947 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1948 *
1949 * Unregister the current CPUFreq driver. Only call this if you have
1950 * the right to do so, i.e. if you have succeeded in initialising before!
1951 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1952 * currently not initialised.
1953 */
cpufreq_unregister_driver(struct cpufreq_driver * driver)1954 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1955 {
1956 unsigned long flags;
1957
1958 cpufreq_debug_disable_ratelimit();
1959
1960 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1961 cpufreq_debug_enable_ratelimit();
1962 return -EINVAL;
1963 }
1964
1965 dprintk("unregistering driver %s\n", driver->name);
1966
1967 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1968 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1969
1970 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1971 cpufreq_driver = NULL;
1972 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1973
1974 return 0;
1975 }
1976 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1977
cpufreq_core_init(void)1978 static int __init cpufreq_core_init(void)
1979 {
1980 int cpu;
1981
1982 for_each_possible_cpu(cpu) {
1983 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1984 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1985 }
1986
1987 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1988 &cpu_sysdev_class.kset.kobj);
1989 BUG_ON(!cpufreq_global_kobject);
1990 register_syscore_ops(&cpufreq_syscore_ops);
1991
1992 return 0;
1993 }
1994 core_initcall(cpufreq_core_init);
1995