1 /*
2  * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33 #include <linux/sysdev.h>
34 
35 #include <asm/uaccess.h>
36 
37 #include <acpi/acpi_bus.h>
38 #include <acpi/processor.h>
39 #include <acpi/acpi_drivers.h>
40 
41 #define PREFIX "ACPI: "
42 
43 #define ACPI_PROCESSOR_CLASS            "processor"
44 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
45 ACPI_MODULE_NAME("processor_thermal");
46 
47 #ifdef CONFIG_CPU_FREQ
48 
49 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
50  * offers (in most cases) voltage scaling in addition to frequency scaling, and
51  * thus a cubic (instead of linear) reduction of energy. Also, we allow for
52  * _any_ cpufreq driver and not only the acpi-cpufreq driver.
53  */
54 
55 #define CPUFREQ_THERMAL_MIN_STEP 0
56 #define CPUFREQ_THERMAL_MAX_STEP 3
57 
58 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
59 static unsigned int acpi_thermal_cpufreq_is_init = 0;
60 
cpu_has_cpufreq(unsigned int cpu)61 static int cpu_has_cpufreq(unsigned int cpu)
62 {
63 	struct cpufreq_policy policy;
64 	if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
65 		return 0;
66 	return 1;
67 }
68 
acpi_thermal_cpufreq_notifier(struct notifier_block * nb,unsigned long event,void * data)69 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
70 					 unsigned long event, void *data)
71 {
72 	struct cpufreq_policy *policy = data;
73 	unsigned long max_freq = 0;
74 
75 	if (event != CPUFREQ_ADJUST)
76 		goto out;
77 
78 	max_freq = (
79 	    policy->cpuinfo.max_freq *
80 	    (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
81 	) / 100;
82 
83 	cpufreq_verify_within_limits(policy, 0, max_freq);
84 
85       out:
86 	return 0;
87 }
88 
89 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
90 	.notifier_call = acpi_thermal_cpufreq_notifier,
91 };
92 
cpufreq_get_max_state(unsigned int cpu)93 static int cpufreq_get_max_state(unsigned int cpu)
94 {
95 	if (!cpu_has_cpufreq(cpu))
96 		return 0;
97 
98 	return CPUFREQ_THERMAL_MAX_STEP;
99 }
100 
cpufreq_get_cur_state(unsigned int cpu)101 static int cpufreq_get_cur_state(unsigned int cpu)
102 {
103 	if (!cpu_has_cpufreq(cpu))
104 		return 0;
105 
106 	return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
107 }
108 
cpufreq_set_cur_state(unsigned int cpu,int state)109 static int cpufreq_set_cur_state(unsigned int cpu, int state)
110 {
111 	if (!cpu_has_cpufreq(cpu))
112 		return 0;
113 
114 	per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
115 	cpufreq_update_policy(cpu);
116 	return 0;
117 }
118 
acpi_thermal_cpufreq_init(void)119 void acpi_thermal_cpufreq_init(void)
120 {
121 	int i;
122 
123 	for (i = 0; i < nr_cpu_ids; i++)
124 		if (cpu_present(i))
125 			per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
126 
127 	i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
128 				      CPUFREQ_POLICY_NOTIFIER);
129 	if (!i)
130 		acpi_thermal_cpufreq_is_init = 1;
131 }
132 
acpi_thermal_cpufreq_exit(void)133 void acpi_thermal_cpufreq_exit(void)
134 {
135 	if (acpi_thermal_cpufreq_is_init)
136 		cpufreq_unregister_notifier
137 		    (&acpi_thermal_cpufreq_notifier_block,
138 		     CPUFREQ_POLICY_NOTIFIER);
139 
140 	acpi_thermal_cpufreq_is_init = 0;
141 }
142 
143 #else				/* ! CONFIG_CPU_FREQ */
cpufreq_get_max_state(unsigned int cpu)144 static int cpufreq_get_max_state(unsigned int cpu)
145 {
146 	return 0;
147 }
148 
cpufreq_get_cur_state(unsigned int cpu)149 static int cpufreq_get_cur_state(unsigned int cpu)
150 {
151 	return 0;
152 }
153 
cpufreq_set_cur_state(unsigned int cpu,int state)154 static int cpufreq_set_cur_state(unsigned int cpu, int state)
155 {
156 	return 0;
157 }
158 
159 #endif
160 
acpi_processor_get_limit_info(struct acpi_processor * pr)161 int acpi_processor_get_limit_info(struct acpi_processor *pr)
162 {
163 
164 	if (!pr)
165 		return -EINVAL;
166 
167 	if (pr->flags.throttling)
168 		pr->flags.limit = 1;
169 
170 	return 0;
171 }
172 
173 /* thermal coolign device callbacks */
acpi_processor_max_state(struct acpi_processor * pr)174 static int acpi_processor_max_state(struct acpi_processor *pr)
175 {
176 	int max_state = 0;
177 
178 	/*
179 	 * There exists four states according to
180 	 * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
181 	 */
182 	max_state += cpufreq_get_max_state(pr->id);
183 	if (pr->flags.throttling)
184 		max_state += (pr->throttling.state_count -1);
185 
186 	return max_state;
187 }
188 static int
processor_get_max_state(struct thermal_cooling_device * cdev,unsigned long * state)189 processor_get_max_state(struct thermal_cooling_device *cdev,
190 			unsigned long *state)
191 {
192 	struct acpi_device *device = cdev->devdata;
193 	struct acpi_processor *pr = acpi_driver_data(device);
194 
195 	if (!device || !pr)
196 		return -EINVAL;
197 
198 	*state = acpi_processor_max_state(pr);
199 	return 0;
200 }
201 
202 static int
processor_get_cur_state(struct thermal_cooling_device * cdev,unsigned long * cur_state)203 processor_get_cur_state(struct thermal_cooling_device *cdev,
204 			unsigned long *cur_state)
205 {
206 	struct acpi_device *device = cdev->devdata;
207 	struct acpi_processor *pr = acpi_driver_data(device);
208 
209 	if (!device || !pr)
210 		return -EINVAL;
211 
212 	*cur_state = cpufreq_get_cur_state(pr->id);
213 	if (pr->flags.throttling)
214 		*cur_state += pr->throttling.state;
215 	return 0;
216 }
217 
218 static int
processor_set_cur_state(struct thermal_cooling_device * cdev,unsigned long state)219 processor_set_cur_state(struct thermal_cooling_device *cdev,
220 			unsigned long state)
221 {
222 	struct acpi_device *device = cdev->devdata;
223 	struct acpi_processor *pr = acpi_driver_data(device);
224 	int result = 0;
225 	int max_pstate;
226 
227 	if (!device || !pr)
228 		return -EINVAL;
229 
230 	max_pstate = cpufreq_get_max_state(pr->id);
231 
232 	if (state > acpi_processor_max_state(pr))
233 		return -EINVAL;
234 
235 	if (state <= max_pstate) {
236 		if (pr->flags.throttling && pr->throttling.state)
237 			result = acpi_processor_set_throttling(pr, 0, false);
238 		cpufreq_set_cur_state(pr->id, state);
239 	} else {
240 		cpufreq_set_cur_state(pr->id, max_pstate);
241 		result = acpi_processor_set_throttling(pr,
242 				state - max_pstate, false);
243 	}
244 	return result;
245 }
246 
247 struct thermal_cooling_device_ops processor_cooling_ops = {
248 	.get_max_state = processor_get_max_state,
249 	.get_cur_state = processor_get_cur_state,
250 	.set_cur_state = processor_set_cur_state,
251 };
252