Lines Matching refs:cpu

286 void smp_muxed_ipi_set_message(int cpu, int msg)  in smp_muxed_ipi_set_message()  argument
288 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message()
298 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument
300 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass()
306 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
360 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument
363 smp_ops->message_pass(cpu, msg); in do_message_pass()
366 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass()
370 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
373 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in arch_smp_send_reschedule()
377 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
379 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_single_ipi()
384 unsigned int cpu; in arch_send_call_function_ipi_mask() local
386 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
387 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); in arch_send_call_function_ipi_mask()
478 static void do_smp_send_nmi_ipi(int cpu, bool safe) in do_smp_send_nmi_ipi() argument
480 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
483 if (cpu >= 0) { in do_smp_send_nmi_ipi()
484 do_message_pass(cpu, PPC_MSG_NMI_IPI); in do_smp_send_nmi_ipi()
502 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), in __smp_send_nmi_ipi() argument
509 BUG_ON(cpu == me); in __smp_send_nmi_ipi()
510 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); in __smp_send_nmi_ipi()
526 if (cpu < 0) { in __smp_send_nmi_ipi()
531 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); in __smp_send_nmi_ipi()
538 do_smp_send_nmi_ipi(cpu, safe); in __smp_send_nmi_ipi()
567 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_nmi_ipi() argument
569 return __smp_send_nmi_ipi(cpu, fn, delay_us, false); in smp_send_nmi_ipi()
572 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) in smp_send_safe_nmi_ipi() argument
574 return __smp_send_nmi_ipi(cpu, fn, delay_us, true); in smp_send_safe_nmi_ipi()
581 unsigned int cpu; in tick_broadcast() local
583 for_each_cpu(cpu, mask) in tick_broadcast()
584 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); in tick_broadcast()
603 int cpu; in crash_send_ipi() local
607 for_each_present_cpu(cpu) { in crash_send_ipi()
608 if (cpu_online(cpu)) in crash_send_ipi()
619 do_smp_send_nmi_ipi(cpu, false); in crash_send_ipi()
875 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) in get_cpu_thread_group_start() argument
877 int hw_cpu_id = get_hard_smp_processor_id(cpu); in get_cpu_thread_group_start()
894 static struct thread_groups *__init get_thread_groups(int cpu, in get_thread_groups() argument
898 struct device_node *dn = of_get_cpu_node(cpu, NULL); in get_thread_groups()
899 struct thread_groups_list *cpu_tgl = &tgl[cpu]; in get_thread_groups()
930 int cpu, int cpu_group_start) in update_mask_from_threadgroup() argument
932 int first_thread = cpu_first_thread_sibling(cpu); in update_mask_from_threadgroup()
935 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); in update_mask_from_threadgroup()
952 static int __init init_thread_group_cache_map(int cpu, int cache_property) in init_thread_group_cache_map() argument
963 tg = get_thread_groups(cpu, cache_property, &err); in init_thread_group_cache_map()
968 cpu_group_start = get_cpu_thread_group_start(cpu, tg); in init_thread_group_cache_map()
976 mask = &per_cpu(thread_group_l1_cache_map, cpu); in init_thread_group_cache_map()
977 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
980 mask = &per_cpu(thread_group_l2_cache_map, cpu); in init_thread_group_cache_map()
981 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
982 mask = &per_cpu(thread_group_l3_cache_map, cpu); in init_thread_group_cache_map()
983 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); in init_thread_group_cache_map()
1021 static const struct cpumask *shared_cache_mask(int cpu) in shared_cache_mask() argument
1023 return per_cpu(cpu_l2_cache_map, cpu); in shared_cache_mask()
1027 static const struct cpumask *smallcore_smt_mask(int cpu) in smallcore_smt_mask() argument
1029 return cpu_smallcore_mask(cpu); in smallcore_smt_mask()
1033 static struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
1035 return per_cpu(cpu_coregroup_map, cpu); in cpu_coregroup_mask()
1043 static const struct cpumask *cpu_mc_mask(int cpu) in cpu_mc_mask() argument
1045 return cpu_coregroup_mask(cpu); in cpu_mc_mask()
1060 int cpu; in init_big_cores() local
1062 for_each_possible_cpu(cpu) { in init_big_cores()
1063 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); in init_big_cores()
1068 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), in init_big_cores()
1070 cpu_to_node(cpu)); in init_big_cores()
1075 for_each_possible_cpu(cpu) { in init_big_cores()
1076 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); in init_big_cores()
1091 unsigned int cpu, num_threads; in smp_prepare_cpus() local
1105 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
1106 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus()
1107 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1108 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), in smp_prepare_cpus()
1109 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1110 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus()
1111 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1113 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), in smp_prepare_cpus()
1114 GFP_KERNEL, cpu_to_node(cpu)); in smp_prepare_cpus()
1120 if (cpu_present(cpu)) { in smp_prepare_cpus()
1121 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); in smp_prepare_cpus()
1122 set_cpu_numa_mem(cpu, in smp_prepare_cpus()
1123 local_memory_node(numa_cpu_lookup_table[cpu])); in smp_prepare_cpus()
1180 unsigned int cpu = smp_processor_id(); in generic_cpu_disable() local
1182 if (cpu == boot_cpuid) in generic_cpu_disable()
1185 set_cpu_online(cpu, false); in generic_cpu_disable()
1207 void generic_cpu_die(unsigned int cpu) in generic_cpu_die() argument
1213 if (is_cpu_dead(cpu)) in generic_cpu_die()
1217 printk(KERN_ERR "CPU%d didn't die...\n", cpu); in generic_cpu_die()
1220 void generic_set_cpu_dead(unsigned int cpu) in generic_set_cpu_dead() argument
1222 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead()
1230 void generic_set_cpu_up(unsigned int cpu) in generic_set_cpu_up() argument
1232 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up()
1235 int generic_check_cpu_restart(unsigned int cpu) in generic_check_cpu_restart() argument
1237 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
1240 int is_cpu_dead(unsigned int cpu) in is_cpu_dead() argument
1242 return per_cpu(cpu_state, cpu) == CPU_DEAD; in is_cpu_dead()
1256 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) in cpu_idle_thread_init() argument
1259 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1260 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1263 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init()
1264 secondary_current = current_set[cpu] = idle; in cpu_idle_thread_init()
1267 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
1280 cpu_thread_in_subcore(cpu)) in __cpu_up()
1284 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1287 cpu_idle_thread_init(cpu, tidle); in __cpu_up()
1294 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1302 cpu_callin_map[cpu] = 0; in __cpu_up()
1311 DBG("smp: kicking cpu %d\n", cpu); in __cpu_up()
1312 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1314 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); in __cpu_up()
1326 spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); in __cpu_up()
1328 if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { in __cpu_up()
1333 while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) in __cpu_up()
1337 if (!cpu_callin_map[cpu]) { in __cpu_up()
1338 printk(KERN_ERR "Processor %u is stuck.\n", cpu); in __cpu_up()
1342 DBG("Processor %u found.\n", cpu); in __cpu_up()
1348 spin_until_cond(cpu_online(cpu)); in __cpu_up()
1356 int cpu_to_core_id(int cpu) in cpu_to_core_id() argument
1361 np = of_get_cpu_node(cpu, NULL); in cpu_to_core_id()
1373 int cpu_core_index_of_thread(int cpu) in cpu_core_index_of_thread() argument
1375 return cpu >> threads_shift; in cpu_core_index_of_thread()
1388 static struct device_node *cpu_to_l2cache(int cpu) in cpu_to_l2cache() argument
1393 if (!cpu_present(cpu)) in cpu_to_l2cache()
1396 np = of_get_cpu_node(cpu, NULL); in cpu_to_l2cache()
1407 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) in update_mask_by_l2() argument
1421 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1423 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { in update_mask_by_l2()
1425 set_cpus_related(i, cpu, cpu_l2_cache_mask); in update_mask_by_l2()
1429 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) && in update_mask_by_l2()
1430 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) { in update_mask_by_l2()
1432 cpu); in update_mask_by_l2()
1438 l2_cache = cpu_to_l2cache(cpu); in update_mask_by_l2()
1441 for_each_cpu(i, cpu_sibling_mask(cpu)) in update_mask_by_l2()
1442 set_cpus_related(cpu, i, cpu_l2_cache_mask); in update_mask_by_l2()
1447 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_mask_by_l2()
1450 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1453 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); in update_mask_by_l2()
1464 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); in update_mask_by_l2()
1478 static void remove_cpu_from_masks(int cpu) in remove_cpu_from_masks() argument
1483 unmap_cpu_from_node(cpu); in remove_cpu_from_masks()
1488 for_each_cpu(i, mask_fn(cpu)) { in remove_cpu_from_masks()
1489 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); in remove_cpu_from_masks()
1490 set_cpus_unrelated(cpu, i, cpu_sibling_mask); in remove_cpu_from_masks()
1492 set_cpus_unrelated(cpu, i, cpu_smallcore_mask); in remove_cpu_from_masks()
1495 for_each_cpu(i, cpu_core_mask(cpu)) in remove_cpu_from_masks()
1496 set_cpus_unrelated(cpu, i, cpu_core_mask); in remove_cpu_from_masks()
1499 for_each_cpu(i, cpu_coregroup_mask(cpu)) in remove_cpu_from_masks()
1500 set_cpus_unrelated(cpu, i, cpu_coregroup_mask); in remove_cpu_from_masks()
1505 static inline void add_cpu_to_smallcore_masks(int cpu) in add_cpu_to_smallcore_masks() argument
1512 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); in add_cpu_to_smallcore_masks()
1514 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { in add_cpu_to_smallcore_masks()
1516 set_cpus_related(i, cpu, cpu_smallcore_mask); in add_cpu_to_smallcore_masks()
1520 static void update_coregroup_mask(int cpu, cpumask_var_t *mask) in update_coregroup_mask() argument
1523 int coregroup_id = cpu_to_coregroup_id(cpu); in update_coregroup_mask()
1531 for_each_cpu(i, submask_fn(cpu)) in update_coregroup_mask()
1532 set_cpus_related(cpu, i, cpu_coregroup_mask); in update_coregroup_mask()
1537 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); in update_coregroup_mask()
1540 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1543 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); in update_coregroup_mask()
1548 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); in update_coregroup_mask()
1556 static void add_cpu_to_masks(int cpu) in add_cpu_to_masks() argument
1559 int first_thread = cpu_first_thread_sibling(cpu); in add_cpu_to_masks()
1569 map_cpu_to_node(cpu, cpu_to_node(cpu)); in add_cpu_to_masks()
1570 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); in add_cpu_to_masks()
1571 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); in add_cpu_to_masks()
1575 set_cpus_related(i, cpu, cpu_sibling_mask); in add_cpu_to_masks()
1577 add_cpu_to_smallcore_masks(cpu); in add_cpu_to_masks()
1580 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); in add_cpu_to_masks()
1581 update_mask_by_l2(cpu, &mask); in add_cpu_to_masks()
1584 update_coregroup_mask(cpu, &mask); in add_cpu_to_masks()
1587 chip_id = cpu_to_chip_id(cpu); in add_cpu_to_masks()
1593 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1596 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); in add_cpu_to_masks()
1600 cpumask_and(mask, mask, cpu_cpu_mask(cpu)); in add_cpu_to_masks()
1604 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); in add_cpu_to_masks()
1618 unsigned int cpu = raw_smp_processor_id(); in start_secondary() local
1627 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in start_secondary()
1630 smp_store_cpu_info(cpu); in start_secondary()
1632 rcu_cpu_starting(cpu); in start_secondary()
1633 cpu_callin_map[cpu] = 1; in start_secondary()
1636 smp_ops->setup_cpu(cpu); in start_secondary()
1648 set_numa_node(numa_cpu_lookup_table[cpu]); in start_secondary()
1649 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); in start_secondary()
1652 add_cpu_to_masks(cpu); in start_secondary()
1660 struct cpumask *mask = cpu_l2_cache_mask(cpu); in start_secondary()
1665 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) in start_secondary()
1670 notify_cpu_starting(cpu); in start_secondary()
1671 set_cpu_online(cpu, true); in start_secondary()
1744 int cpu = smp_processor_id(); in __cpu_disable() local
1757 remove_cpu_from_masks(cpu); in __cpu_disable()
1762 void __cpu_die(unsigned int cpu) in __cpu_die() argument
1768 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm))); in __cpu_die()
1770 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
1773 smp_ops->cpu_die(cpu); in __cpu_die()