Lines Matching refs:cpu
206 void smp_muxed_ipi_set_data(int cpu, unsigned long data) in smp_muxed_ipi_set_data() argument
208 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_data()
213 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
215 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_message_pass()
227 smp_ops->cause_ipi(cpu, info->data); in smp_muxed_ipi_message_pass()
258 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
261 smp_ops->message_pass(cpu, msg); in do_message_pass()
264 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
268 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
271 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in smp_send_reschedule()
275 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
277 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); in arch_send_call_function_single_ipi()
282 unsigned int cpu; in arch_send_call_function_ipi_mask() local
284 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
285 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_ipi_mask()
291 int cpu; in smp_send_debugger_break() local
297 for_each_online_cpu(cpu) in smp_send_debugger_break()
298 if (cpu != me) in smp_send_debugger_break()
299 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); in smp_send_debugger_break()
342 unsigned int cpu; in smp_prepare_cpus() local
356 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
357 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
358 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
359 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
360 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
388 unsigned int cpu = smp_processor_id(); in generic_cpu_disable() local
390 if (cpu == boot_cpuid) in generic_cpu_disable()
393 set_cpu_online(cpu, false); in generic_cpu_disable()
401 void generic_cpu_die(unsigned int cpu) in generic_cpu_die() argument
407 if (per_cpu(cpu_state, cpu) == CPU_DEAD) in generic_cpu_die()
411 printk(KERN_ERR "CPU%d didn't die...\n", cpu); in generic_cpu_die()
416 unsigned int cpu; in generic_mach_cpu_die() local
420 cpu = smp_processor_id(); in generic_mach_cpu_die()
421 printk(KERN_DEBUG "CPU%d offline\n", cpu); in generic_mach_cpu_die()
428 void generic_set_cpu_dead(unsigned int cpu) in generic_set_cpu_dead() argument
430 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
433 int generic_check_cpu_restart(unsigned int cpu) in generic_check_cpu_restart() argument
435 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
443 int cpu; member
451 c_idle->idle = fork_idle(c_idle->cpu); in do_fork_idle()
455 static int __cpuinit create_idle(unsigned int cpu) in create_idle() argument
459 .cpu = cpu, in create_idle()
464 c_idle.idle = get_idle_for_cpu(cpu); in create_idle()
475 init_idle(c_idle.idle, cpu); in create_idle()
477 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); in create_idle()
483 paca[cpu].__current = c_idle.idle; in create_idle()
484 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; in create_idle()
486 ti->cpu = cpu; in create_idle()
487 current_set[cpu] = ti; in create_idle()
492 int __cpuinit __cpu_up(unsigned int cpu) in __cpu_up() argument
497 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
501 rc = create_idle(cpu); in __cpu_up()
505 secondary_ti = current_set[cpu]; in __cpu_up()
510 cpu_callin_map[cpu] = 0; in __cpu_up()
519 DBG("smp: kicking cpu %d\n", cpu); in __cpu_up()
520 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
522 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); in __cpu_up()
532 for (c = 50000; c && !cpu_callin_map[cpu]; c--) in __cpu_up()
540 for (c = 5000; c && !cpu_callin_map[cpu]; c--) in __cpu_up()
544 if (!cpu_callin_map[cpu]) { in __cpu_up()
545 printk(KERN_ERR "Processor %u is stuck.\n", cpu); in __cpu_up()
549 DBG("Processor %u found.\n", cpu); in __cpu_up()
555 while (!cpu_online(cpu)) in __cpu_up()
564 int cpu_to_core_id(int cpu) in cpu_to_core_id() argument
570 np = of_get_cpu_node(cpu, NULL); in cpu_to_core_id()
585 int cpu_core_index_of_thread(int cpu) in cpu_core_index_of_thread() argument
587 return cpu >> threads_shift; in cpu_core_index_of_thread()
600 static struct device_node *cpu_to_l2cache(int cpu) in cpu_to_l2cache() argument
605 if (!cpu_present(cpu)) in cpu_to_l2cache()
608 np = of_get_cpu_node(cpu, NULL); in cpu_to_l2cache()
622 unsigned int cpu = smp_processor_id(); in start_secondary() local
629 smp_store_cpu_info(cpu); in start_secondary()
632 cpu_callin_map[cpu] = 1; in start_secondary()
635 smp_ops->setup_cpu(cpu); in start_secondary()
646 notify_cpu_starting(cpu); in start_secondary()
647 set_cpu_online(cpu, true); in start_secondary()
649 base = cpu_first_thread_sibling(cpu); in start_secondary()
653 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); in start_secondary()
654 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); in start_secondary()
660 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); in start_secondary()
661 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); in start_secondary()
663 l2_cache = cpu_to_l2cache(cpu); in start_secondary()
669 cpumask_set_cpu(cpu, cpu_core_mask(i)); in start_secondary()
670 cpumask_set_cpu(i, cpu_core_mask(cpu)); in start_secondary()
728 int cpu = smp_processor_id(); in __cpu_disable() local
740 base = cpu_first_thread_sibling(cpu); in __cpu_disable()
742 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); in __cpu_disable()
743 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); in __cpu_disable()
744 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); in __cpu_disable()
745 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); in __cpu_disable()
748 l2_cache = cpu_to_l2cache(cpu); in __cpu_disable()
754 cpumask_clear_cpu(cpu, cpu_core_mask(i)); in __cpu_disable()
755 cpumask_clear_cpu(i, cpu_core_mask(cpu)); in __cpu_disable()
765 void __cpu_die(unsigned int cpu) in __cpu_die() argument
768 smp_ops->cpu_die(cpu); in __cpu_die()