1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * processor_idle - idle state submodule to the ACPI processor driver
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *  			- Added processor hotplug support
10  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11  *  			- Added support for C3 on SMP
12  */
13 #define pr_fmt(fmt) "ACPI: " fmt
14 
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h>       /* need_resched() */
19 #include <linux/sort.h>
20 #include <linux/tick.h>
21 #include <linux/cpuidle.h>
22 #include <linux/cpu.h>
23 #include <linux/minmax.h>
24 #include <linux/perf_event.h>
25 #include <acpi/processor.h>
26 #include <linux/context_tracking.h>
27 
28 /*
29  * Include the apic definitions for x86 to have the APIC timer related defines
30  * available also for UP (on SMP it gets magically included via linux/smp.h).
31  * asm/acpi.h is not an option, as it would require more include magic. Also
32  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
33  */
34 #ifdef CONFIG_X86
35 #include <asm/apic.h>
36 #include <asm/cpu.h>
37 #endif
38 
39 #define ACPI_IDLE_STATE_START	(IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
40 
41 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
42 module_param(max_cstate, uint, 0400);
43 static bool nocst __read_mostly;
44 module_param(nocst, bool, 0400);
45 static bool bm_check_disable __read_mostly;
46 module_param(bm_check_disable, bool, 0400);
47 
48 static unsigned int latency_factor __read_mostly = 2;
49 module_param(latency_factor, uint, 0644);
50 
51 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
52 
53 struct cpuidle_driver acpi_idle_driver = {
54 	.name =		"acpi_idle",
55 	.owner =	THIS_MODULE,
56 };
57 
58 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
59 static
60 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
61 
disabled_by_idle_boot_param(void)62 static int disabled_by_idle_boot_param(void)
63 {
64 	return boot_option_idle_override == IDLE_POLL ||
65 		boot_option_idle_override == IDLE_HALT;
66 }
67 
68 /*
69  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
70  * For now disable this. Probably a bug somewhere else.
71  *
72  * To skip this limit, boot/load with a large max_cstate limit.
73  */
set_max_cstate(const struct dmi_system_id * id)74 static int set_max_cstate(const struct dmi_system_id *id)
75 {
76 	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
77 		return 0;
78 
79 	pr_notice("%s detected - limiting to C%ld max_cstate."
80 		  " Override with \"processor.max_cstate=%d\"\n", id->ident,
81 		  (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
82 
83 	max_cstate = (long)id->driver_data;
84 
85 	return 0;
86 }
87 
88 static const struct dmi_system_id processor_power_dmi_table[] = {
89 	{ set_max_cstate, "Clevo 5600D", {
90 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
91 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
92 	 (void *)2},
93 	{ set_max_cstate, "Pavilion zv5000", {
94 	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
95 	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
96 	 (void *)1},
97 	{ set_max_cstate, "Asus L8400B", {
98 	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
99 	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
100 	 (void *)1},
101 	{},
102 };
103 
104 
105 /*
106  * Callers should disable interrupts before the call and enable
107  * interrupts after return.
108  */
acpi_safe_halt(void)109 static void __cpuidle acpi_safe_halt(void)
110 {
111 	if (!tif_need_resched()) {
112 		safe_halt();
113 		local_irq_disable();
114 	}
115 }
116 
117 #ifdef ARCH_APICTIMER_STOPS_ON_C3
118 
119 /*
120  * Some BIOS implementations switch to C3 in the published C2 state.
121  * This seems to be a common problem on AMD boxen, but other vendors
122  * are affected too. We pick the most conservative approach: we assume
123  * that the local APIC stops in both C2 and C3.
124  */
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cx)125 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
126 				   struct acpi_processor_cx *cx)
127 {
128 	struct acpi_processor_power *pwr = &pr->power;
129 	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
130 
131 	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
132 		return;
133 
134 	if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
135 		type = ACPI_STATE_C1;
136 
137 	/*
138 	 * Check, if one of the previous states already marked the lapic
139 	 * unstable
140 	 */
141 	if (pwr->timer_broadcast_on_state < state)
142 		return;
143 
144 	if (cx->type >= type)
145 		pr->power.timer_broadcast_on_state = state;
146 }
147 
__lapic_timer_propagate_broadcast(void * arg)148 static void __lapic_timer_propagate_broadcast(void *arg)
149 {
150 	struct acpi_processor *pr = (struct acpi_processor *) arg;
151 
152 	if (pr->power.timer_broadcast_on_state < INT_MAX)
153 		tick_broadcast_enable();
154 	else
155 		tick_broadcast_disable();
156 }
157 
lapic_timer_propagate_broadcast(struct acpi_processor * pr)158 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
159 {
160 	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
161 				 (void *)pr, 1);
162 }
163 
164 /* Power(C) State timer broadcast control */
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)165 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
166 					struct acpi_processor_cx *cx)
167 {
168 	return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
169 }
170 
171 #else
172 
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cstate)173 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
174 				   struct acpi_processor_cx *cstate) { }
lapic_timer_propagate_broadcast(struct acpi_processor * pr)175 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
176 
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)177 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
178 					struct acpi_processor_cx *cx)
179 {
180 	return false;
181 }
182 
183 #endif
184 
185 #if defined(CONFIG_X86)
tsc_check_state(int state)186 static void tsc_check_state(int state)
187 {
188 	switch (boot_cpu_data.x86_vendor) {
189 	case X86_VENDOR_HYGON:
190 	case X86_VENDOR_AMD:
191 	case X86_VENDOR_INTEL:
192 	case X86_VENDOR_CENTAUR:
193 	case X86_VENDOR_ZHAOXIN:
194 		/*
195 		 * AMD Fam10h TSC will tick in all
196 		 * C/P/S0/S1 states when this bit is set.
197 		 */
198 		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
199 			return;
200 		fallthrough;
201 	default:
202 		/* TSC could halt in idle, so notify users */
203 		if (state > ACPI_STATE_C1)
204 			mark_tsc_unstable("TSC halts in idle");
205 	}
206 }
207 #else
tsc_check_state(int state)208 static void tsc_check_state(int state) { return; }
209 #endif
210 
acpi_processor_get_power_info_fadt(struct acpi_processor * pr)211 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
212 {
213 
214 	if (!pr->pblk)
215 		return -ENODEV;
216 
217 	/* if info is obtained from pblk/fadt, type equals state */
218 	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
219 	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
220 
221 #ifndef CONFIG_HOTPLUG_CPU
222 	/*
223 	 * Check for P_LVL2_UP flag before entering C2 and above on
224 	 * an SMP system.
225 	 */
226 	if ((num_online_cpus() > 1) &&
227 	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
228 		return -ENODEV;
229 #endif
230 
231 	/* determine C2 and C3 address from pblk */
232 	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
233 	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
234 
235 	/* determine latencies from FADT */
236 	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
237 	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
238 
239 	/*
240 	 * FADT specified C2 latency must be less than or equal to
241 	 * 100 microseconds.
242 	 */
243 	if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
244 		acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
245 				  acpi_gbl_FADT.c2_latency);
246 		/* invalidate C2 */
247 		pr->power.states[ACPI_STATE_C2].address = 0;
248 	}
249 
250 	/*
251 	 * FADT supplied C3 latency must be less than or equal to
252 	 * 1000 microseconds.
253 	 */
254 	if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
255 		acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
256 				  acpi_gbl_FADT.c3_latency);
257 		/* invalidate C3 */
258 		pr->power.states[ACPI_STATE_C3].address = 0;
259 	}
260 
261 	acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
262 			  pr->power.states[ACPI_STATE_C2].address,
263 			  pr->power.states[ACPI_STATE_C3].address);
264 
265 	snprintf(pr->power.states[ACPI_STATE_C2].desc,
266 			 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
267 			 pr->power.states[ACPI_STATE_C2].address);
268 	snprintf(pr->power.states[ACPI_STATE_C3].desc,
269 			 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
270 			 pr->power.states[ACPI_STATE_C3].address);
271 
272 	return 0;
273 }
274 
acpi_processor_get_power_info_default(struct acpi_processor * pr)275 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
276 {
277 	if (!pr->power.states[ACPI_STATE_C1].valid) {
278 		/* set the first C-State to C1 */
279 		/* all processors need to support C1 */
280 		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
281 		pr->power.states[ACPI_STATE_C1].valid = 1;
282 		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
283 
284 		snprintf(pr->power.states[ACPI_STATE_C1].desc,
285 			 ACPI_CX_DESC_LEN, "ACPI HLT");
286 	}
287 	/* the C0 state only exists as a filler in our array */
288 	pr->power.states[ACPI_STATE_C0].valid = 1;
289 	return 0;
290 }
291 
acpi_processor_get_power_info_cst(struct acpi_processor * pr)292 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
293 {
294 	int ret;
295 
296 	if (nocst)
297 		return -ENODEV;
298 
299 	ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
300 	if (ret)
301 		return ret;
302 
303 	if (!pr->power.count)
304 		return -EFAULT;
305 
306 	pr->flags.has_cst = 1;
307 	return 0;
308 }
309 
acpi_processor_power_verify_c3(struct acpi_processor * pr,struct acpi_processor_cx * cx)310 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
311 					   struct acpi_processor_cx *cx)
312 {
313 	static int bm_check_flag = -1;
314 	static int bm_control_flag = -1;
315 
316 
317 	if (!cx->address)
318 		return;
319 
320 	/*
321 	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
322 	 * DMA transfers are used by any ISA device to avoid livelock.
323 	 * Note that we could disable Type-F DMA (as recommended by
324 	 * the erratum), but this is known to disrupt certain ISA
325 	 * devices thus we take the conservative approach.
326 	 */
327 	else if (errata.piix4.fdma) {
328 		acpi_handle_debug(pr->handle,
329 				  "C3 not supported on PIIX4 with Type-F DMA\n");
330 		return;
331 	}
332 
333 	/* All the logic here assumes flags.bm_check is same across all CPUs */
334 	if (bm_check_flag == -1) {
335 		/* Determine whether bm_check is needed based on CPU  */
336 		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
337 		bm_check_flag = pr->flags.bm_check;
338 		bm_control_flag = pr->flags.bm_control;
339 	} else {
340 		pr->flags.bm_check = bm_check_flag;
341 		pr->flags.bm_control = bm_control_flag;
342 	}
343 
344 	if (pr->flags.bm_check) {
345 		if (!pr->flags.bm_control) {
346 			if (pr->flags.has_cst != 1) {
347 				/* bus mastering control is necessary */
348 				acpi_handle_debug(pr->handle,
349 						  "C3 support requires BM control\n");
350 				return;
351 			} else {
352 				/* Here we enter C3 without bus mastering */
353 				acpi_handle_debug(pr->handle,
354 						  "C3 support without BM control\n");
355 			}
356 		}
357 	} else {
358 		/*
359 		 * WBINVD should be set in fadt, for C3 state to be
360 		 * supported on when bm_check is not required.
361 		 */
362 		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
363 			acpi_handle_debug(pr->handle,
364 					  "Cache invalidation should work properly"
365 					  " for C3 to be enabled on SMP systems\n");
366 			return;
367 		}
368 	}
369 
370 	/*
371 	 * Otherwise we've met all of our C3 requirements.
372 	 * Normalize the C3 latency to expidite policy.  Enable
373 	 * checking of bus mastering status (bm_check) so we can
374 	 * use this in our C3 policy
375 	 */
376 	cx->valid = 1;
377 
378 	/*
379 	 * On older chipsets, BM_RLD needs to be set
380 	 * in order for Bus Master activity to wake the
381 	 * system from C3.  Newer chipsets handle DMA
382 	 * during C3 automatically and BM_RLD is a NOP.
383 	 * In either case, the proper way to
384 	 * handle BM_RLD is to set it and leave it set.
385 	 */
386 	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
387 
388 	return;
389 }
390 
acpi_cst_latency_cmp(const void * a,const void * b)391 static int acpi_cst_latency_cmp(const void *a, const void *b)
392 {
393 	const struct acpi_processor_cx *x = a, *y = b;
394 
395 	if (!(x->valid && y->valid))
396 		return 0;
397 	if (x->latency > y->latency)
398 		return 1;
399 	if (x->latency < y->latency)
400 		return -1;
401 	return 0;
402 }
acpi_cst_latency_swap(void * a,void * b,int n)403 static void acpi_cst_latency_swap(void *a, void *b, int n)
404 {
405 	struct acpi_processor_cx *x = a, *y = b;
406 
407 	if (!(x->valid && y->valid))
408 		return;
409 	swap(x->latency, y->latency);
410 }
411 
acpi_processor_power_verify(struct acpi_processor * pr)412 static int acpi_processor_power_verify(struct acpi_processor *pr)
413 {
414 	unsigned int i;
415 	unsigned int working = 0;
416 	unsigned int last_latency = 0;
417 	unsigned int last_type = 0;
418 	bool buggy_latency = false;
419 
420 	pr->power.timer_broadcast_on_state = INT_MAX;
421 
422 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
423 		struct acpi_processor_cx *cx = &pr->power.states[i];
424 
425 		switch (cx->type) {
426 		case ACPI_STATE_C1:
427 			cx->valid = 1;
428 			break;
429 
430 		case ACPI_STATE_C2:
431 			if (!cx->address)
432 				break;
433 			cx->valid = 1;
434 			break;
435 
436 		case ACPI_STATE_C3:
437 			acpi_processor_power_verify_c3(pr, cx);
438 			break;
439 		}
440 		if (!cx->valid)
441 			continue;
442 		if (cx->type >= last_type && cx->latency < last_latency)
443 			buggy_latency = true;
444 		last_latency = cx->latency;
445 		last_type = cx->type;
446 
447 		lapic_timer_check_state(i, pr, cx);
448 		tsc_check_state(cx->type);
449 		working++;
450 	}
451 
452 	if (buggy_latency) {
453 		pr_notice("FW issue: working around C-state latencies out of order\n");
454 		sort(&pr->power.states[1], max_cstate,
455 		     sizeof(struct acpi_processor_cx),
456 		     acpi_cst_latency_cmp,
457 		     acpi_cst_latency_swap);
458 	}
459 
460 	lapic_timer_propagate_broadcast(pr);
461 
462 	return (working);
463 }
464 
acpi_processor_get_cstate_info(struct acpi_processor * pr)465 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
466 {
467 	unsigned int i;
468 	int result;
469 
470 
471 	/* NOTE: the idle thread may not be running while calling
472 	 * this function */
473 
474 	/* Zero initialize all the C-states info. */
475 	memset(pr->power.states, 0, sizeof(pr->power.states));
476 
477 	result = acpi_processor_get_power_info_cst(pr);
478 	if (result == -ENODEV)
479 		result = acpi_processor_get_power_info_fadt(pr);
480 
481 	if (result)
482 		return result;
483 
484 	acpi_processor_get_power_info_default(pr);
485 
486 	pr->power.count = acpi_processor_power_verify(pr);
487 
488 	/*
489 	 * if one state of type C2 or C3 is available, mark this
490 	 * CPU as being "idle manageable"
491 	 */
492 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
493 		if (pr->power.states[i].valid) {
494 			pr->power.count = i;
495 			pr->flags.power = 1;
496 		}
497 	}
498 
499 	return 0;
500 }
501 
502 /**
503  * acpi_idle_bm_check - checks if bus master activity was detected
504  */
acpi_idle_bm_check(void)505 static int acpi_idle_bm_check(void)
506 {
507 	u32 bm_status = 0;
508 
509 	if (bm_check_disable)
510 		return 0;
511 
512 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
513 	if (bm_status)
514 		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
515 	/*
516 	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
517 	 * the true state of bus mastering activity; forcing us to
518 	 * manually check the BMIDEA bit of each IDE channel.
519 	 */
520 	else if (errata.piix4.bmisx) {
521 		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
522 		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
523 			bm_status = 1;
524 	}
525 	return bm_status;
526 }
527 
wait_for_freeze(void)528 static void wait_for_freeze(void)
529 {
530 #ifdef	CONFIG_X86
531 	/* No delay is needed if we are in guest */
532 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
533 		return;
534 	/*
535 	 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
536 	 * not this code.  Assume that any Intel systems using this
537 	 * are ancient and may need the dummy wait.  This also assumes
538 	 * that the motivating chipset issue was Intel-only.
539 	 */
540 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
541 		return;
542 #endif
543 	/*
544 	 * Dummy wait op - must do something useless after P_LVL2 read
545 	 * because chipsets cannot guarantee that STPCLK# signal gets
546 	 * asserted in time to freeze execution properly
547 	 *
548 	 * This workaround has been in place since the original ACPI
549 	 * implementation was merged, circa 2002.
550 	 *
551 	 * If a profile is pointing to this instruction, please first
552 	 * consider moving your system to a more modern idle
553 	 * mechanism.
554 	 */
555 	inl(acpi_gbl_FADT.xpm_timer_block.address);
556 }
557 
558 /**
559  * acpi_idle_do_entry - enter idle state using the appropriate method
560  * @cx: cstate data
561  *
562  * Caller disables interrupt before call and enables interrupt after return.
563  */
acpi_idle_do_entry(struct acpi_processor_cx * cx)564 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
565 {
566 	perf_lopwr_cb(true);
567 
568 	if (cx->entry_method == ACPI_CSTATE_FFH) {
569 		/* Call into architectural FFH based C-state */
570 		acpi_processor_ffh_cstate_enter(cx);
571 	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
572 		acpi_safe_halt();
573 	} else {
574 		/* IO port based C-state */
575 		inb(cx->address);
576 		wait_for_freeze();
577 	}
578 
579 	perf_lopwr_cb(false);
580 }
581 
582 /**
583  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
584  * @dev: the target CPU
585  * @index: the index of suggested state
586  */
acpi_idle_play_dead(struct cpuidle_device * dev,int index)587 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
588 {
589 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
590 
591 	ACPI_FLUSH_CPU_CACHE();
592 
593 	while (1) {
594 
595 		if (cx->entry_method == ACPI_CSTATE_HALT)
596 			safe_halt();
597 		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
598 			inb(cx->address);
599 			wait_for_freeze();
600 		} else
601 			return -ENODEV;
602 
603 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
604 		cond_wakeup_cpu0();
605 #endif
606 	}
607 
608 	/* Never reached */
609 	return 0;
610 }
611 
acpi_idle_fallback_to_c1(struct acpi_processor * pr)612 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
613 {
614 	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
615 		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
616 }
617 
618 static int c3_cpu_count;
619 static DEFINE_RAW_SPINLOCK(c3_lock);
620 
621 /**
622  * acpi_idle_enter_bm - enters C3 with proper BM handling
623  * @drv: cpuidle driver
624  * @pr: Target processor
625  * @cx: Target state context
626  * @index: index of target state
627  */
acpi_idle_enter_bm(struct cpuidle_driver * drv,struct acpi_processor * pr,struct acpi_processor_cx * cx,int index)628 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
629 			       struct acpi_processor *pr,
630 			       struct acpi_processor_cx *cx,
631 			       int index)
632 {
633 	static struct acpi_processor_cx safe_cx = {
634 		.entry_method = ACPI_CSTATE_HALT,
635 	};
636 
637 	/*
638 	 * disable bus master
639 	 * bm_check implies we need ARB_DIS
640 	 * bm_control implies whether we can do ARB_DIS
641 	 *
642 	 * That leaves a case where bm_check is set and bm_control is not set.
643 	 * In that case we cannot do much, we enter C3 without doing anything.
644 	 */
645 	bool dis_bm = pr->flags.bm_control;
646 
647 	/* If we can skip BM, demote to a safe state. */
648 	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
649 		dis_bm = false;
650 		index = drv->safe_state_index;
651 		if (index >= 0) {
652 			cx = this_cpu_read(acpi_cstate[index]);
653 		} else {
654 			cx = &safe_cx;
655 			index = -EBUSY;
656 		}
657 	}
658 
659 	if (dis_bm) {
660 		raw_spin_lock(&c3_lock);
661 		c3_cpu_count++;
662 		/* Disable bus master arbitration when all CPUs are in C3 */
663 		if (c3_cpu_count == num_online_cpus())
664 			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
665 		raw_spin_unlock(&c3_lock);
666 	}
667 
668 	ct_idle_enter();
669 
670 	acpi_idle_do_entry(cx);
671 
672 	ct_idle_exit();
673 
674 	/* Re-enable bus master arbitration */
675 	if (dis_bm) {
676 		raw_spin_lock(&c3_lock);
677 		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
678 		c3_cpu_count--;
679 		raw_spin_unlock(&c3_lock);
680 	}
681 
682 	return index;
683 }
684 
acpi_idle_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)685 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
686 			   struct cpuidle_driver *drv, int index)
687 {
688 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
689 	struct acpi_processor *pr;
690 
691 	pr = __this_cpu_read(processors);
692 	if (unlikely(!pr))
693 		return -EINVAL;
694 
695 	if (cx->type != ACPI_STATE_C1) {
696 		if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
697 			return acpi_idle_enter_bm(drv, pr, cx, index);
698 
699 		/* C2 to C1 demotion. */
700 		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
701 			index = ACPI_IDLE_STATE_START;
702 			cx = per_cpu(acpi_cstate[index], dev->cpu);
703 		}
704 	}
705 
706 	if (cx->type == ACPI_STATE_C3)
707 		ACPI_FLUSH_CPU_CACHE();
708 
709 	acpi_idle_do_entry(cx);
710 
711 	return index;
712 }
713 
acpi_idle_enter_s2idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)714 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
715 				  struct cpuidle_driver *drv, int index)
716 {
717 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
718 
719 	if (cx->type == ACPI_STATE_C3) {
720 		struct acpi_processor *pr = __this_cpu_read(processors);
721 
722 		if (unlikely(!pr))
723 			return 0;
724 
725 		if (pr->flags.bm_check) {
726 			u8 bm_sts_skip = cx->bm_sts_skip;
727 
728 			/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
729 			cx->bm_sts_skip = 1;
730 			acpi_idle_enter_bm(drv, pr, cx, index);
731 			cx->bm_sts_skip = bm_sts_skip;
732 
733 			return 0;
734 		} else {
735 			ACPI_FLUSH_CPU_CACHE();
736 		}
737 	}
738 	acpi_idle_do_entry(cx);
739 
740 	return 0;
741 }
742 
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)743 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
744 					   struct cpuidle_device *dev)
745 {
746 	int i, count = ACPI_IDLE_STATE_START;
747 	struct acpi_processor_cx *cx;
748 	struct cpuidle_state *state;
749 
750 	if (max_cstate == 0)
751 		max_cstate = 1;
752 
753 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
754 		state = &acpi_idle_driver.states[count];
755 		cx = &pr->power.states[i];
756 
757 		if (!cx->valid)
758 			continue;
759 
760 		per_cpu(acpi_cstate[count], dev->cpu) = cx;
761 
762 		if (lapic_timer_needs_broadcast(pr, cx))
763 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
764 
765 		if (cx->type == ACPI_STATE_C3) {
766 			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
767 			if (pr->flags.bm_check)
768 				state->flags |= CPUIDLE_FLAG_RCU_IDLE;
769 		}
770 
771 		count++;
772 		if (count == CPUIDLE_STATE_MAX)
773 			break;
774 	}
775 
776 	if (!count)
777 		return -EINVAL;
778 
779 	return 0;
780 }
781 
acpi_processor_setup_cstates(struct acpi_processor * pr)782 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
783 {
784 	int i, count;
785 	struct acpi_processor_cx *cx;
786 	struct cpuidle_state *state;
787 	struct cpuidle_driver *drv = &acpi_idle_driver;
788 
789 	if (max_cstate == 0)
790 		max_cstate = 1;
791 
792 	if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
793 		cpuidle_poll_state_init(drv);
794 		count = 1;
795 	} else {
796 		count = 0;
797 	}
798 
799 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
800 		cx = &pr->power.states[i];
801 
802 		if (!cx->valid)
803 			continue;
804 
805 		state = &drv->states[count];
806 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
807 		strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
808 		state->exit_latency = cx->latency;
809 		state->target_residency = cx->latency * latency_factor;
810 		state->enter = acpi_idle_enter;
811 
812 		state->flags = 0;
813 		if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
814 		    cx->type == ACPI_STATE_C3) {
815 			state->enter_dead = acpi_idle_play_dead;
816 			if (cx->type != ACPI_STATE_C3)
817 				drv->safe_state_index = count;
818 		}
819 		/*
820 		 * Halt-induced C1 is not good for ->enter_s2idle, because it
821 		 * re-enables interrupts on exit.  Moreover, C1 is generally not
822 		 * particularly interesting from the suspend-to-idle angle, so
823 		 * avoid C1 and the situations in which we may need to fall back
824 		 * to it altogether.
825 		 */
826 		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
827 			state->enter_s2idle = acpi_idle_enter_s2idle;
828 
829 		count++;
830 		if (count == CPUIDLE_STATE_MAX)
831 			break;
832 	}
833 
834 	drv->state_count = count;
835 
836 	if (!count)
837 		return -EINVAL;
838 
839 	return 0;
840 }
841 
acpi_processor_cstate_first_run_checks(void)842 static inline void acpi_processor_cstate_first_run_checks(void)
843 {
844 	static int first_run;
845 
846 	if (first_run)
847 		return;
848 	dmi_check_system(processor_power_dmi_table);
849 	max_cstate = acpi_processor_cstate_check(max_cstate);
850 	if (max_cstate < ACPI_C_STATES_MAX)
851 		pr_notice("processor limited to max C-state %d\n", max_cstate);
852 
853 	first_run++;
854 
855 	if (nocst)
856 		return;
857 
858 	acpi_processor_claim_cst_control();
859 }
860 #else
861 
disabled_by_idle_boot_param(void)862 static inline int disabled_by_idle_boot_param(void) { return 0; }
acpi_processor_cstate_first_run_checks(void)863 static inline void acpi_processor_cstate_first_run_checks(void) { }
acpi_processor_get_cstate_info(struct acpi_processor * pr)864 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
865 {
866 	return -ENODEV;
867 }
868 
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)869 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
870 					   struct cpuidle_device *dev)
871 {
872 	return -EINVAL;
873 }
874 
acpi_processor_setup_cstates(struct acpi_processor * pr)875 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
876 {
877 	return -EINVAL;
878 }
879 
880 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
881 
882 struct acpi_lpi_states_array {
883 	unsigned int size;
884 	unsigned int composite_states_size;
885 	struct acpi_lpi_state *entries;
886 	struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
887 };
888 
obj_get_integer(union acpi_object * obj,u32 * value)889 static int obj_get_integer(union acpi_object *obj, u32 *value)
890 {
891 	if (obj->type != ACPI_TYPE_INTEGER)
892 		return -EINVAL;
893 
894 	*value = obj->integer.value;
895 	return 0;
896 }
897 
acpi_processor_evaluate_lpi(acpi_handle handle,struct acpi_lpi_states_array * info)898 static int acpi_processor_evaluate_lpi(acpi_handle handle,
899 				       struct acpi_lpi_states_array *info)
900 {
901 	acpi_status status;
902 	int ret = 0;
903 	int pkg_count, state_idx = 1, loop;
904 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
905 	union acpi_object *lpi_data;
906 	struct acpi_lpi_state *lpi_state;
907 
908 	status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
909 	if (ACPI_FAILURE(status)) {
910 		acpi_handle_debug(handle, "No _LPI, giving up\n");
911 		return -ENODEV;
912 	}
913 
914 	lpi_data = buffer.pointer;
915 
916 	/* There must be at least 4 elements = 3 elements + 1 package */
917 	if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
918 	    lpi_data->package.count < 4) {
919 		pr_debug("not enough elements in _LPI\n");
920 		ret = -ENODATA;
921 		goto end;
922 	}
923 
924 	pkg_count = lpi_data->package.elements[2].integer.value;
925 
926 	/* Validate number of power states. */
927 	if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
928 		pr_debug("count given by _LPI is not valid\n");
929 		ret = -ENODATA;
930 		goto end;
931 	}
932 
933 	lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
934 	if (!lpi_state) {
935 		ret = -ENOMEM;
936 		goto end;
937 	}
938 
939 	info->size = pkg_count;
940 	info->entries = lpi_state;
941 
942 	/* LPI States start at index 3 */
943 	for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
944 		union acpi_object *element, *pkg_elem, *obj;
945 
946 		element = &lpi_data->package.elements[loop];
947 		if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
948 			continue;
949 
950 		pkg_elem = element->package.elements;
951 
952 		obj = pkg_elem + 6;
953 		if (obj->type == ACPI_TYPE_BUFFER) {
954 			struct acpi_power_register *reg;
955 
956 			reg = (struct acpi_power_register *)obj->buffer.pointer;
957 			if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
958 			    reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
959 				continue;
960 
961 			lpi_state->address = reg->address;
962 			lpi_state->entry_method =
963 				reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
964 				ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
965 		} else if (obj->type == ACPI_TYPE_INTEGER) {
966 			lpi_state->entry_method = ACPI_CSTATE_INTEGER;
967 			lpi_state->address = obj->integer.value;
968 		} else {
969 			continue;
970 		}
971 
972 		/* elements[7,8] skipped for now i.e. Residency/Usage counter*/
973 
974 		obj = pkg_elem + 9;
975 		if (obj->type == ACPI_TYPE_STRING)
976 			strscpy(lpi_state->desc, obj->string.pointer,
977 				ACPI_CX_DESC_LEN);
978 
979 		lpi_state->index = state_idx;
980 		if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
981 			pr_debug("No min. residency found, assuming 10 us\n");
982 			lpi_state->min_residency = 10;
983 		}
984 
985 		if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
986 			pr_debug("No wakeup residency found, assuming 10 us\n");
987 			lpi_state->wake_latency = 10;
988 		}
989 
990 		if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
991 			lpi_state->flags = 0;
992 
993 		if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
994 			lpi_state->arch_flags = 0;
995 
996 		if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
997 			lpi_state->res_cnt_freq = 1;
998 
999 		if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
1000 			lpi_state->enable_parent_state = 0;
1001 	}
1002 
1003 	acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1004 end:
1005 	kfree(buffer.pointer);
1006 	return ret;
1007 }
1008 
1009 /*
1010  * flat_state_cnt - the number of composite LPI states after the process of flattening
1011  */
1012 static int flat_state_cnt;
1013 
1014 /**
1015  * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1016  *
1017  * @local: local LPI state
1018  * @parent: parent LPI state
1019  * @result: composite LPI state
1020  */
combine_lpi_states(struct acpi_lpi_state * local,struct acpi_lpi_state * parent,struct acpi_lpi_state * result)1021 static bool combine_lpi_states(struct acpi_lpi_state *local,
1022 			       struct acpi_lpi_state *parent,
1023 			       struct acpi_lpi_state *result)
1024 {
1025 	if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1026 		if (!parent->address) /* 0 means autopromotable */
1027 			return false;
1028 		result->address = local->address + parent->address;
1029 	} else {
1030 		result->address = parent->address;
1031 	}
1032 
1033 	result->min_residency = max(local->min_residency, parent->min_residency);
1034 	result->wake_latency = local->wake_latency + parent->wake_latency;
1035 	result->enable_parent_state = parent->enable_parent_state;
1036 	result->entry_method = local->entry_method;
1037 
1038 	result->flags = parent->flags;
1039 	result->arch_flags = parent->arch_flags;
1040 	result->index = parent->index;
1041 
1042 	strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1043 	strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1044 	strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1045 	return true;
1046 }
1047 
1048 #define ACPI_LPI_STATE_FLAGS_ENABLED			BIT(0)
1049 
stash_composite_state(struct acpi_lpi_states_array * curr_level,struct acpi_lpi_state * t)1050 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1051 				  struct acpi_lpi_state *t)
1052 {
1053 	curr_level->composite_states[curr_level->composite_states_size++] = t;
1054 }
1055 
flatten_lpi_states(struct acpi_processor * pr,struct acpi_lpi_states_array * curr_level,struct acpi_lpi_states_array * prev_level)1056 static int flatten_lpi_states(struct acpi_processor *pr,
1057 			      struct acpi_lpi_states_array *curr_level,
1058 			      struct acpi_lpi_states_array *prev_level)
1059 {
1060 	int i, j, state_count = curr_level->size;
1061 	struct acpi_lpi_state *p, *t = curr_level->entries;
1062 
1063 	curr_level->composite_states_size = 0;
1064 	for (j = 0; j < state_count; j++, t++) {
1065 		struct acpi_lpi_state *flpi;
1066 
1067 		if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1068 			continue;
1069 
1070 		if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1071 			pr_warn("Limiting number of LPI states to max (%d)\n",
1072 				ACPI_PROCESSOR_MAX_POWER);
1073 			pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1074 			break;
1075 		}
1076 
1077 		flpi = &pr->power.lpi_states[flat_state_cnt];
1078 
1079 		if (!prev_level) { /* leaf/processor node */
1080 			memcpy(flpi, t, sizeof(*t));
1081 			stash_composite_state(curr_level, flpi);
1082 			flat_state_cnt++;
1083 			continue;
1084 		}
1085 
1086 		for (i = 0; i < prev_level->composite_states_size; i++) {
1087 			p = prev_level->composite_states[i];
1088 			if (t->index <= p->enable_parent_state &&
1089 			    combine_lpi_states(p, t, flpi)) {
1090 				stash_composite_state(curr_level, flpi);
1091 				flat_state_cnt++;
1092 				flpi++;
1093 			}
1094 		}
1095 	}
1096 
1097 	kfree(curr_level->entries);
1098 	return 0;
1099 }
1100 
acpi_processor_ffh_lpi_probe(unsigned int cpu)1101 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1102 {
1103 	return -EOPNOTSUPP;
1104 }
1105 
acpi_processor_get_lpi_info(struct acpi_processor * pr)1106 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1107 {
1108 	int ret, i;
1109 	acpi_status status;
1110 	acpi_handle handle = pr->handle, pr_ahandle;
1111 	struct acpi_device *d = NULL;
1112 	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1113 
1114 	/* make sure our architecture has support */
1115 	ret = acpi_processor_ffh_lpi_probe(pr->id);
1116 	if (ret == -EOPNOTSUPP)
1117 		return ret;
1118 
1119 	if (!osc_pc_lpi_support_confirmed)
1120 		return -EOPNOTSUPP;
1121 
1122 	if (!acpi_has_method(handle, "_LPI"))
1123 		return -EINVAL;
1124 
1125 	flat_state_cnt = 0;
1126 	prev = &info[0];
1127 	curr = &info[1];
1128 	handle = pr->handle;
1129 	ret = acpi_processor_evaluate_lpi(handle, prev);
1130 	if (ret)
1131 		return ret;
1132 	flatten_lpi_states(pr, prev, NULL);
1133 
1134 	status = acpi_get_parent(handle, &pr_ahandle);
1135 	while (ACPI_SUCCESS(status)) {
1136 		d = acpi_fetch_acpi_dev(pr_ahandle);
1137 		if (!d)
1138 			break;
1139 
1140 		handle = pr_ahandle;
1141 
1142 		if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1143 			break;
1144 
1145 		/* can be optional ? */
1146 		if (!acpi_has_method(handle, "_LPI"))
1147 			break;
1148 
1149 		ret = acpi_processor_evaluate_lpi(handle, curr);
1150 		if (ret)
1151 			break;
1152 
1153 		/* flatten all the LPI states in this level of hierarchy */
1154 		flatten_lpi_states(pr, curr, prev);
1155 
1156 		tmp = prev, prev = curr, curr = tmp;
1157 
1158 		status = acpi_get_parent(handle, &pr_ahandle);
1159 	}
1160 
1161 	pr->power.count = flat_state_cnt;
1162 	/* reset the index after flattening */
1163 	for (i = 0; i < pr->power.count; i++)
1164 		pr->power.lpi_states[i].index = i;
1165 
1166 	/* Tell driver that _LPI is supported. */
1167 	pr->flags.has_lpi = 1;
1168 	pr->flags.power = 1;
1169 
1170 	return 0;
1171 }
1172 
acpi_processor_ffh_lpi_enter(struct acpi_lpi_state * lpi)1173 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1174 {
1175 	return -ENODEV;
1176 }
1177 
1178 /**
1179  * acpi_idle_lpi_enter - enters an ACPI any LPI state
1180  * @dev: the target CPU
1181  * @drv: cpuidle driver containing cpuidle state info
1182  * @index: index of target state
1183  *
1184  * Return: 0 for success or negative value for error
1185  */
acpi_idle_lpi_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)1186 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1187 			       struct cpuidle_driver *drv, int index)
1188 {
1189 	struct acpi_processor *pr;
1190 	struct acpi_lpi_state *lpi;
1191 
1192 	pr = __this_cpu_read(processors);
1193 
1194 	if (unlikely(!pr))
1195 		return -EINVAL;
1196 
1197 	lpi = &pr->power.lpi_states[index];
1198 	if (lpi->entry_method == ACPI_CSTATE_FFH)
1199 		return acpi_processor_ffh_lpi_enter(lpi);
1200 
1201 	return -EINVAL;
1202 }
1203 
acpi_processor_setup_lpi_states(struct acpi_processor * pr)1204 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1205 {
1206 	int i;
1207 	struct acpi_lpi_state *lpi;
1208 	struct cpuidle_state *state;
1209 	struct cpuidle_driver *drv = &acpi_idle_driver;
1210 
1211 	if (!pr->flags.has_lpi)
1212 		return -EOPNOTSUPP;
1213 
1214 	for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1215 		lpi = &pr->power.lpi_states[i];
1216 
1217 		state = &drv->states[i];
1218 		snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1219 		strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1220 		state->exit_latency = lpi->wake_latency;
1221 		state->target_residency = lpi->min_residency;
1222 		if (lpi->arch_flags)
1223 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1224 		state->enter = acpi_idle_lpi_enter;
1225 		drv->safe_state_index = i;
1226 	}
1227 
1228 	drv->state_count = i;
1229 
1230 	return 0;
1231 }
1232 
1233 /**
1234  * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1235  * global state data i.e. idle routines
1236  *
1237  * @pr: the ACPI processor
1238  */
acpi_processor_setup_cpuidle_states(struct acpi_processor * pr)1239 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1240 {
1241 	int i;
1242 	struct cpuidle_driver *drv = &acpi_idle_driver;
1243 
1244 	if (!pr->flags.power_setup_done || !pr->flags.power)
1245 		return -EINVAL;
1246 
1247 	drv->safe_state_index = -1;
1248 	for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1249 		drv->states[i].name[0] = '\0';
1250 		drv->states[i].desc[0] = '\0';
1251 	}
1252 
1253 	if (pr->flags.has_lpi)
1254 		return acpi_processor_setup_lpi_states(pr);
1255 
1256 	return acpi_processor_setup_cstates(pr);
1257 }
1258 
1259 /**
1260  * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1261  * device i.e. per-cpu data
1262  *
1263  * @pr: the ACPI processor
1264  * @dev : the cpuidle device
1265  */
acpi_processor_setup_cpuidle_dev(struct acpi_processor * pr,struct cpuidle_device * dev)1266 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1267 					    struct cpuidle_device *dev)
1268 {
1269 	if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1270 		return -EINVAL;
1271 
1272 	dev->cpu = pr->id;
1273 	if (pr->flags.has_lpi)
1274 		return acpi_processor_ffh_lpi_probe(pr->id);
1275 
1276 	return acpi_processor_setup_cpuidle_cx(pr, dev);
1277 }
1278 
acpi_processor_get_power_info(struct acpi_processor * pr)1279 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1280 {
1281 	int ret;
1282 
1283 	ret = acpi_processor_get_lpi_info(pr);
1284 	if (ret)
1285 		ret = acpi_processor_get_cstate_info(pr);
1286 
1287 	return ret;
1288 }
1289 
acpi_processor_hotplug(struct acpi_processor * pr)1290 int acpi_processor_hotplug(struct acpi_processor *pr)
1291 {
1292 	int ret = 0;
1293 	struct cpuidle_device *dev;
1294 
1295 	if (disabled_by_idle_boot_param())
1296 		return 0;
1297 
1298 	if (!pr->flags.power_setup_done)
1299 		return -ENODEV;
1300 
1301 	dev = per_cpu(acpi_cpuidle_device, pr->id);
1302 	cpuidle_pause_and_lock();
1303 	cpuidle_disable_device(dev);
1304 	ret = acpi_processor_get_power_info(pr);
1305 	if (!ret && pr->flags.power) {
1306 		acpi_processor_setup_cpuidle_dev(pr, dev);
1307 		ret = cpuidle_enable_device(dev);
1308 	}
1309 	cpuidle_resume_and_unlock();
1310 
1311 	return ret;
1312 }
1313 
acpi_processor_power_state_has_changed(struct acpi_processor * pr)1314 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1315 {
1316 	int cpu;
1317 	struct acpi_processor *_pr;
1318 	struct cpuidle_device *dev;
1319 
1320 	if (disabled_by_idle_boot_param())
1321 		return 0;
1322 
1323 	if (!pr->flags.power_setup_done)
1324 		return -ENODEV;
1325 
1326 	/*
1327 	 * FIXME:  Design the ACPI notification to make it once per
1328 	 * system instead of once per-cpu.  This condition is a hack
1329 	 * to make the code that updates C-States be called once.
1330 	 */
1331 
1332 	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1333 
1334 		/* Protect against cpu-hotplug */
1335 		cpus_read_lock();
1336 		cpuidle_pause_and_lock();
1337 
1338 		/* Disable all cpuidle devices */
1339 		for_each_online_cpu(cpu) {
1340 			_pr = per_cpu(processors, cpu);
1341 			if (!_pr || !_pr->flags.power_setup_done)
1342 				continue;
1343 			dev = per_cpu(acpi_cpuidle_device, cpu);
1344 			cpuidle_disable_device(dev);
1345 		}
1346 
1347 		/* Populate Updated C-state information */
1348 		acpi_processor_get_power_info(pr);
1349 		acpi_processor_setup_cpuidle_states(pr);
1350 
1351 		/* Enable all cpuidle devices */
1352 		for_each_online_cpu(cpu) {
1353 			_pr = per_cpu(processors, cpu);
1354 			if (!_pr || !_pr->flags.power_setup_done)
1355 				continue;
1356 			acpi_processor_get_power_info(_pr);
1357 			if (_pr->flags.power) {
1358 				dev = per_cpu(acpi_cpuidle_device, cpu);
1359 				acpi_processor_setup_cpuidle_dev(_pr, dev);
1360 				cpuidle_enable_device(dev);
1361 			}
1362 		}
1363 		cpuidle_resume_and_unlock();
1364 		cpus_read_unlock();
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 static int acpi_processor_registered;
1371 
acpi_processor_power_init(struct acpi_processor * pr)1372 int acpi_processor_power_init(struct acpi_processor *pr)
1373 {
1374 	int retval;
1375 	struct cpuidle_device *dev;
1376 
1377 	if (disabled_by_idle_boot_param())
1378 		return 0;
1379 
1380 	acpi_processor_cstate_first_run_checks();
1381 
1382 	if (!acpi_processor_get_power_info(pr))
1383 		pr->flags.power_setup_done = 1;
1384 
1385 	/*
1386 	 * Install the idle handler if processor power management is supported.
1387 	 * Note that we use previously set idle handler will be used on
1388 	 * platforms that only support C1.
1389 	 */
1390 	if (pr->flags.power) {
1391 		/* Register acpi_idle_driver if not already registered */
1392 		if (!acpi_processor_registered) {
1393 			acpi_processor_setup_cpuidle_states(pr);
1394 			retval = cpuidle_register_driver(&acpi_idle_driver);
1395 			if (retval)
1396 				return retval;
1397 			pr_debug("%s registered with cpuidle\n",
1398 				 acpi_idle_driver.name);
1399 		}
1400 
1401 		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1402 		if (!dev)
1403 			return -ENOMEM;
1404 		per_cpu(acpi_cpuidle_device, pr->id) = dev;
1405 
1406 		acpi_processor_setup_cpuidle_dev(pr, dev);
1407 
1408 		/* Register per-cpu cpuidle_device. Cpuidle driver
1409 		 * must already be registered before registering device
1410 		 */
1411 		retval = cpuidle_register_device(dev);
1412 		if (retval) {
1413 			if (acpi_processor_registered == 0)
1414 				cpuidle_unregister_driver(&acpi_idle_driver);
1415 			return retval;
1416 		}
1417 		acpi_processor_registered++;
1418 	}
1419 	return 0;
1420 }
1421 
acpi_processor_power_exit(struct acpi_processor * pr)1422 int acpi_processor_power_exit(struct acpi_processor *pr)
1423 {
1424 	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1425 
1426 	if (disabled_by_idle_boot_param())
1427 		return 0;
1428 
1429 	if (pr->flags.power) {
1430 		cpuidle_unregister_device(dev);
1431 		acpi_processor_registered--;
1432 		if (acpi_processor_registered == 0)
1433 			cpuidle_unregister_driver(&acpi_idle_driver);
1434 	}
1435 
1436 	pr->flags.power_setup_done = 0;
1437 	return 0;
1438 }
1439