Lines Matching refs:coupled
151 int n = dev->coupled->online_count; in cpuidle_coupled_parallel_barrier()
206 static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_set_ready() argument
208 atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); in cpuidle_coupled_set_ready()
226 inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_set_not_ready() argument
231 all = coupled->online_count | (coupled->online_count << WAITING_BITS); in cpuidle_coupled_set_not_ready()
232 ret = atomic_add_unless(&coupled->ready_waiting_counts, in cpuidle_coupled_set_not_ready()
244 static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_no_cpus_ready() argument
246 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; in cpuidle_coupled_no_cpus_ready()
256 static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_cpus_ready() argument
258 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; in cpuidle_coupled_cpus_ready()
259 return r == coupled->online_count; in cpuidle_coupled_cpus_ready()
268 static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) in cpuidle_coupled_cpus_waiting() argument
270 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_cpus_waiting()
271 return w == coupled->online_count; in cpuidle_coupled_cpus_waiting()
280 static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) in cpuidle_coupled_no_cpus_waiting() argument
282 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_no_cpus_waiting()
294 struct cpuidle_coupled *coupled) in cpuidle_coupled_get_state() argument
306 for_each_cpu(i, &coupled->coupled_cpus) in cpuidle_coupled_get_state()
307 if (cpu_online(i) && coupled->requested_state[i] < state) in cpuidle_coupled_get_state()
308 state = coupled->requested_state[i]; in cpuidle_coupled_get_state()
348 struct cpuidle_coupled *coupled) in cpuidle_coupled_poke_others() argument
352 for_each_cpu(cpu, &coupled->coupled_cpus) in cpuidle_coupled_poke_others()
367 struct cpuidle_coupled *coupled, int next_state) in cpuidle_coupled_set_waiting() argument
369 coupled->requested_state[cpu] = next_state; in cpuidle_coupled_set_waiting()
375 return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_set_waiting()
386 struct cpuidle_coupled *coupled) in cpuidle_coupled_set_not_waiting() argument
394 atomic_dec(&coupled->ready_waiting_counts); in cpuidle_coupled_set_not_waiting()
396 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; in cpuidle_coupled_set_not_waiting()
408 static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) in cpuidle_coupled_set_done() argument
410 cpuidle_coupled_set_not_waiting(cpu, coupled); in cpuidle_coupled_set_done()
411 atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); in cpuidle_coupled_set_done()
440 static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) in cpuidle_coupled_any_pokes_pending() argument
445 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_any_pokes_pending()
474 struct cpuidle_coupled *coupled = dev->coupled; in cpuidle_enter_state_coupled() local
477 if (!coupled) in cpuidle_enter_state_coupled()
480 while (coupled->prevent) { in cpuidle_enter_state_coupled()
497 w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); in cpuidle_enter_state_coupled()
505 if (w == coupled->online_count) { in cpuidle_enter_state_coupled()
507 cpuidle_coupled_poke_others(dev->cpu, coupled); in cpuidle_enter_state_coupled()
519 while (!cpuidle_coupled_cpus_waiting(coupled) || in cpuidle_enter_state_coupled()
525 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
529 if (coupled->prevent) { in cpuidle_enter_state_coupled()
530 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
541 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
560 cpuidle_coupled_set_ready(coupled); in cpuidle_enter_state_coupled()
561 while (!cpuidle_coupled_cpus_ready(coupled)) { in cpuidle_enter_state_coupled()
563 if (!cpuidle_coupled_cpus_waiting(coupled)) in cpuidle_enter_state_coupled()
564 if (!cpuidle_coupled_set_not_ready(coupled)) in cpuidle_enter_state_coupled()
585 if (cpuidle_coupled_any_pokes_pending(coupled)) { in cpuidle_enter_state_coupled()
586 cpuidle_coupled_set_done(dev->cpu, coupled); in cpuidle_enter_state_coupled()
588 cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); in cpuidle_enter_state_coupled()
593 next_state = cpuidle_coupled_get_state(dev, coupled); in cpuidle_enter_state_coupled()
597 cpuidle_coupled_set_done(dev->cpu, coupled); in cpuidle_enter_state_coupled()
621 while (!cpuidle_coupled_no_cpus_ready(coupled)) in cpuidle_enter_state_coupled()
627 static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) in cpuidle_coupled_update_online_cpus() argument
630 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_update_online_cpus()
631 coupled->online_count = cpumask_weight(&cpus); in cpuidle_coupled_update_online_cpus()
647 struct cpuidle_coupled *coupled; in cpuidle_coupled_register_device() local
654 if (other_dev && other_dev->coupled) { in cpuidle_coupled_register_device()
655 coupled = other_dev->coupled; in cpuidle_coupled_register_device()
661 coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); in cpuidle_coupled_register_device()
662 if (!coupled) in cpuidle_coupled_register_device()
665 coupled->coupled_cpus = dev->coupled_cpus; in cpuidle_coupled_register_device()
668 dev->coupled = coupled; in cpuidle_coupled_register_device()
669 if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) in cpuidle_coupled_register_device()
670 coupled->prevent++; in cpuidle_coupled_register_device()
672 cpuidle_coupled_update_online_cpus(coupled); in cpuidle_coupled_register_device()
674 coupled->refcnt++; in cpuidle_coupled_register_device()
692 struct cpuidle_coupled *coupled = dev->coupled; in cpuidle_coupled_unregister_device() local
697 if (--coupled->refcnt) in cpuidle_coupled_unregister_device()
698 kfree(coupled); in cpuidle_coupled_unregister_device()
699 dev->coupled = NULL; in cpuidle_coupled_unregister_device()
709 static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) in cpuidle_coupled_prevent_idle() argument
714 coupled->prevent++; in cpuidle_coupled_prevent_idle()
715 cpuidle_coupled_poke_others(cpu, coupled); in cpuidle_coupled_prevent_idle()
717 while (!cpuidle_coupled_no_cpus_waiting(coupled)) in cpuidle_coupled_prevent_idle()
728 static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) in cpuidle_coupled_allow_idle() argument
737 coupled->prevent--; in cpuidle_coupled_allow_idle()
739 cpuidle_coupled_poke_others(cpu, coupled); in cpuidle_coupled_allow_idle()
750 if (dev && dev->coupled) { in coupled_cpu_online()
751 cpuidle_coupled_update_online_cpus(dev->coupled); in coupled_cpu_online()
752 cpuidle_coupled_allow_idle(dev->coupled); in coupled_cpu_online()
766 if (dev && dev->coupled) in coupled_cpu_up_prepare()
767 cpuidle_coupled_prevent_idle(dev->coupled); in coupled_cpu_up_prepare()