/linux-3.4.99/arch/arm/mach-integrator/ |
D | cpu.c | 86 cpumask_t cpus_allowed; in integrator_set_target() local 95 cpus_allowed = current->cpus_allowed; in integrator_set_target() 129 set_cpus_allowed(current, cpus_allowed); in integrator_set_target() 152 set_cpus_allowed(current, cpus_allowed); in integrator_set_target() 161 cpumask_t cpus_allowed; in integrator_get() local 166 cpus_allowed = current->cpus_allowed; in integrator_get() 184 set_cpus_allowed(current, cpus_allowed); in integrator_get()
|
/linux-3.4.99/kernel/ |
D | cpuset.c | 95 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ member 285 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) in guarantee_online_cpus() 288 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask); in guarantee_online_cpus() 348 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset() 366 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) { in alloc_trial_cpuset() 370 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset() 381 free_cpumask_var(trial->cpus_allowed); in free_trial_cpuset() 434 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) in validate_change() 444 if (cpumask_empty(trial->cpus_allowed) || in validate_change() 460 return cpumask_intersects(a->cpus_allowed, b->cpus_allowed); in cpusets_overlap() [all …]
|
D | rcutree_plugin.h | 1594 !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || in rcu_cpu_kthread_should_stop() 1602 if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) in rcu_cpu_kthread_should_stop()
|
/linux-3.4.99/arch/mips/kernel/ |
D | mips-mt-fpaff.c | 65 cpumask_var_t cpus_allowed, new_mask, effective_mask; in mipsmt_sys_sched_setaffinity() local 90 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { in mipsmt_sys_sched_setaffinity() 127 cpuset_cpus_allowed(p, cpus_allowed); in mipsmt_sys_sched_setaffinity() 128 if (!cpumask_subset(effective_mask, cpus_allowed)) { in mipsmt_sys_sched_setaffinity() 134 cpumask_copy(new_mask, cpus_allowed); in mipsmt_sys_sched_setaffinity() 143 free_cpumask_var(cpus_allowed); in mipsmt_sys_sched_setaffinity()
|
D | traps.c | 931 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { in mt_ase_fp_affinity() 935 = current->cpus_allowed; in mt_ase_fp_affinity() 936 cpus_and(tmask, current->cpus_allowed, in mt_ase_fp_affinity()
|
/linux-3.4.99/arch/sparc/kernel/ |
D | us3_cpufreq.c | 81 cpumask_t cpus_allowed; in us3_freq_get() local 88 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); in us3_freq_get() 94 set_cpus_allowed_ptr(current, &cpus_allowed); in us3_freq_get() 102 cpumask_t cpus_allowed; in us3_set_cpu_divider_index() local 108 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); in us3_set_cpu_divider_index() 143 set_cpus_allowed_ptr(current, &cpus_allowed); in us3_set_cpu_divider_index()
|
D | us2e_cpufreq.c | 234 cpumask_t cpus_allowed; in us2e_freq_get() local 240 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); in us2e_freq_get() 246 set_cpus_allowed_ptr(current, &cpus_allowed); in us2e_freq_get() 255 cpumask_t cpus_allowed; in us2e_set_cpu_divider_index() local 261 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); in us2e_set_cpu_divider_index() 284 set_cpus_allowed_ptr(current, &cpus_allowed); in us2e_set_cpu_divider_index()
|
/linux-3.4.99/arch/mips/kernel/cpufreq/ |
D | loongson2_cpufreq.c | 60 cpumask_t cpus_allowed; in loongson2_cpufreq_target() local 67 cpus_allowed = current->cpus_allowed; in loongson2_cpufreq_target() 94 set_cpus_allowed_ptr(current, &cpus_allowed); in loongson2_cpufreq_target()
|
/linux-3.4.99/arch/sh/kernel/ |
D | cpufreq.c | 49 cpumask_t cpus_allowed; in sh_cpufreq_target() local 57 cpus_allowed = current->cpus_allowed; in sh_cpufreq_target() 78 set_cpus_allowed_ptr(current, &cpus_allowed); in sh_cpufreq_target()
|
/linux-3.4.99/kernel/sched/ |
D | cpupri.c | 103 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) in cpupri_find() 107 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); in cpupri_find()
|
D | core.c | 4553 cpumask_var_t cpus_allowed, new_mask; in sched_setaffinity() local 4571 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { in sched_setaffinity() 4587 cpuset_cpus_allowed(p, cpus_allowed); in sched_setaffinity() 4588 cpumask_and(new_mask, in_mask, cpus_allowed); in sched_setaffinity() 4593 cpuset_cpus_allowed(p, cpus_allowed); in sched_setaffinity() 4594 if (!cpumask_subset(new_mask, cpus_allowed)) { in sched_setaffinity() 4600 cpumask_copy(new_mask, cpus_allowed); in sched_setaffinity() 4607 free_cpumask_var(cpus_allowed); in sched_setaffinity() 4666 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); in sched_getaffinity() 5157 cpumask_copy(&p->cpus_allowed, new_mask); in do_set_cpus_allowed() [all …]
|
/linux-3.4.99/kernel/trace/ |
D | trace_workqueue.c | 56 int cpu = cpumask_first(&wq_thread->cpus_allowed); in probe_workqueue_insertion() 78 int cpu = cpumask_first(&wq_thread->cpus_allowed); in probe_workqueue_execution() 124 int cpu = cpumask_first(&wq_thread->cpus_allowed); in probe_workqueue_destruction()
|
/linux-3.4.99/arch/tile/include/asm/ |
D | setup.h | 51 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
|
/linux-3.4.99/arch/mips/include/asm/ |
D | switch_to.h | 52 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
|
/linux-3.4.99/arch/tile/kernel/ |
D | hardwall.c | 428 if (cpumask_weight(&p->cpus_allowed) != 1) in hardwall_activate() 433 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); in hardwall_activate() 466 if (cpumask_weight(&task->cpus_allowed) != 1) { in _hardwall_deactivate() 470 cpumask_weight(&task->cpus_allowed)); in _hardwall_deactivate()
|
/linux-3.4.99/arch/ia64/kernel/cpufreq/ |
D | acpi-cpufreq.c | 113 saved_mask = current->cpus_allowed; in processor_get_freq() 151 saved_mask = current->cpus_allowed; in processor_set_freq()
|
/linux-3.4.99/arch/x86/kernel/cpu/mcheck/ |
D | mce_intel.c | 180 cpumask_copy(old, ¤t->cpus_allowed); in cmci_rediscover()
|
/linux-3.4.99/include/linux/ |
D | init_task.h | 160 .cpus_allowed = CPU_MASK_ALL, \
|
/linux-3.4.99/fs/proc/ |
D | array.c | 333 seq_cpumask(m, &task->cpus_allowed); in task_cpus_allowed() 336 seq_cpumask_list(m, &task->cpus_allowed); in task_cpus_allowed()
|
/linux-3.4.99/arch/powerpc/platforms/cell/spufs/ |
D | spufs.h | 127 cpumask_t cpus_allowed; member
|
D | sched.c | 143 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); in __spu_update_sched_info() 172 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
|
/linux-3.4.99/arch/ia64/kernel/ |
D | topology.c | 365 oldmask = current->cpus_allowed; in cache_add_dev()
|
D | salinfo.c | 407 cpumask_t save_cpus_allowed = current->cpus_allowed; in call_on_cpu()
|
/linux-3.4.99/drivers/firmware/ |
D | dcdbas.c | 261 cpumask_copy(old_mask, ¤t->cpus_allowed); in dcdbas_smi_request()
|
/linux-3.4.99/Documentation/cgroups/ |
D | cpusets.txt | 58 schedule a task on a CPU that is not allowed in its cpus_allowed 158 displaying the task's cpus_allowed (on which CPUs it may be scheduled) 474 (makes sure that all the CPUs in the cpus_allowed of that cpuset are 648 their cpus_allowed to allow all online CPUs. When memory hotplug
|