Lines Matching refs:cs
228 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
230 return css_cs(cs->css.parent); in parent_cs()
235 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
237 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
242 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
244 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
260 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
262 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
265 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
267 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
270 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
272 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
275 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
277 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
280 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
282 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
285 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
287 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
290 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
292 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
295 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
297 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
300 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
302 return cs->partition_root_state > 0; in is_partition_valid()
305 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
307 return cs->partition_root_state < 0; in is_partition_invalid()
313 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
315 if (is_partition_valid(cs)) in make_partition_invalid()
316 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
322 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
324 if (old_prs == cs->partition_root_state) in notify_partition_change()
326 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
329 if (is_partition_valid(cs)) in notify_partition_change()
330 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
464 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
470 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
472 if (!excluded_child && !cs->nr_subparts_cpus) in partition_is_populated()
473 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
476 cpuset_for_each_child(child, css, cs) { in partition_is_populated()
505 struct cpuset *cs; in guarantee_online_cpus() local
511 cs = task_cs(tsk); in guarantee_online_cpus()
513 while (!cpumask_intersects(cs->effective_cpus, pmask)) { in guarantee_online_cpus()
514 cs = parent_cs(cs); in guarantee_online_cpus()
515 if (unlikely(!cs)) { in guarantee_online_cpus()
526 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
545 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
546 cs = parent_cs(cs); in guarantee_online_mems()
547 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
556 static void cpuset_update_task_spread_flags(struct cpuset *cs, in cpuset_update_task_spread_flags() argument
562 if (is_spread_page(cs)) in cpuset_update_task_spread_flags()
567 if (is_spread_slab(cs)) in cpuset_update_task_spread_flags()
597 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
601 if (cs) { in alloc_cpumasks()
602 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
603 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
604 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
634 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
636 if (cs) { in free_cpumasks()
637 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
638 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
639 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
652 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
656 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
665 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
666 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
674 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
676 free_cpumasks(cs, NULL); in free_cpuset()
677 kfree(cs); in free_cpuset()
1069 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
1074 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
1077 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
1087 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
1102 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
1104 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
1109 css_get(&cs->css); in dl_rebuild_rd_accounting()
1113 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
1116 css_put(&cs->css); in dl_rebuild_rd_accounting()
1147 struct cpuset *cs; in rebuild_sched_domains_locked() local
1173 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1174 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1178 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1219 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in update_tasks_cpumask() argument
1223 bool top_cs = cs == &top_cpuset; in update_tasks_cpumask()
1225 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1235 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus); in update_tasks_cpumask()
1237 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in update_tasks_cpumask()
1256 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1258 if (parent->nr_subparts_cpus && is_partition_valid(cs)) { in compute_effective_cpumask()
1261 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1264 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1278 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1280 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1288 static int update_partition_exclusive(struct cpuset *cs, int new_prs) in update_partition_exclusive() argument
1292 if (exclusive && !is_cpu_exclusive(cs)) { in update_partition_exclusive()
1293 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1)) in update_partition_exclusive()
1295 } else if (!exclusive && is_cpu_exclusive(cs)) { in update_partition_exclusive()
1297 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_partition_exclusive()
1309 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) in update_partition_sd_lb() argument
1311 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1322 new_lb = is_sched_load_balance(parent_cs(cs)); in update_partition_sd_lb()
1324 if (new_lb != !!is_sched_load_balance(cs)) { in update_partition_sd_lb()
1327 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1329 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1374 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, in update_parent_subparts_cpumask() argument
1378 struct cpuset *parent = parent_cs(cs); in update_parent_subparts_cpumask()
1395 if (!newmask && cpumask_empty(cs->cpus_allowed)) in update_parent_subparts_cpumask()
1403 old_prs = new_prs = cs->partition_root_state; in update_parent_subparts_cpumask()
1409 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed)) in update_parent_subparts_cpumask()
1416 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) && in update_parent_subparts_cpumask()
1417 partition_is_populated(parent, cs)) in update_parent_subparts_cpumask()
1420 cpumask_copy(tmp->addmask, cs->cpus_allowed); in update_parent_subparts_cpumask()
1428 cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1438 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1454 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1473 partition_is_populated(parent, cs)) { in update_parent_subparts_cpumask()
1476 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1498 cpumask_and(tmp->addmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1503 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) || in update_parent_subparts_cpumask()
1506 partition_is_populated(parent, cs))) { in update_parent_subparts_cpumask()
1511 if (part_error && is_partition_valid(cs) && in update_parent_subparts_cpumask()
1513 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1517 WRITE_ONCE(cs->prs_err, part_error); in update_parent_subparts_cpumask()
1524 switch (cs->partition_root_state) { in update_parent_subparts_cpumask()
1546 int err = update_partition_exclusive(cs, new_prs); in update_parent_subparts_cpumask()
1578 cs->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1585 update_sibling_cpumasks(parent, cs, tmp); in update_parent_subparts_cpumask()
1595 update_partition_sd_lb(cs, old_prs); in update_parent_subparts_cpumask()
1599 notify_partition_change(cs, old_prs); in update_parent_subparts_cpumask()
1622 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
1631 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1682 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
1793 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1813 if (sibling == cs) in update_sibling_cpumasks()
1834 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1840 int old_prs = cs->partition_root_state; in update_cpumask()
1843 if (cs == &top_cpuset) in update_cpumask()
1865 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1871 retval = validate_change(cs, trialcs); in update_cpumask()
1886 parent = parent_cs(cs); in update_cpumask()
1900 if (cs->partition_root_state) { in update_cpumask()
1902 update_parent_subparts_cpumask(cs, partcmd_invalidate, in update_cpumask()
1905 update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1910 parent_cs(cs)); in update_cpumask()
1912 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1919 if (cs->nr_subparts_cpus) { in update_cpumask()
1920 if (!is_partition_valid(cs) || in update_cpumask()
1921 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) && in update_cpumask()
1922 partition_is_populated(cs, NULL))) { in update_cpumask()
1923 cs->nr_subparts_cpus = 0; in update_cpumask()
1924 cpumask_clear(cs->subparts_cpus); in update_cpumask()
1926 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1927 cs->cpus_allowed); in update_cpumask()
1928 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1934 update_cpumasks_hier(cs, &tmp, 0); in update_cpumask()
1936 if (cs->partition_root_state) { in update_cpumask()
1937 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1944 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1947 update_partition_sd_lb(cs, old_prs); in update_cpumask()
2045 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
2051 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
2053 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
2065 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
2076 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
2078 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
2080 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
2090 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
2108 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2114 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2164 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2173 if (cs == &top_cpuset) { in update_nodemask()
2198 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2202 retval = validate_change(cs, trialcs); in update_nodemask()
2209 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2213 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2229 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
2236 if (val != cs->relax_domain_level) { in update_relax_domain_level()
2237 cs->relax_domain_level = val; in update_relax_domain_level()
2238 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
2239 is_sched_load_balance(cs)) in update_relax_domain_level()
2254 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
2259 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
2261 cpuset_update_task_spread_flags(cs, task); in update_tasks_flags()
2274 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
2282 trialcs = alloc_trial_cpuset(cs); in update_flag()
2291 err = validate_change(cs, trialcs); in update_flag()
2295 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
2298 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
2299 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
2302 cs->flags = trialcs->flags; in update_flag()
2309 update_tasks_flags(cs); in update_flag()
2323 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2325 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2326 struct cpuset *parent = parent_cs(cs); in update_prstate()
2337 cs->partition_root_state = -new_prs; in update_prstate()
2344 err = update_partition_exclusive(cs, new_prs); in update_prstate()
2352 if (cpumask_empty(cs->cpus_allowed)) { in update_prstate()
2357 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2369 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, in update_prstate()
2375 if (unlikely(cs->nr_subparts_cpus)) { in update_prstate()
2377 cs->nr_subparts_cpus = 0; in update_prstate()
2378 cpumask_clear(cs->subparts_cpus); in update_prstate()
2379 compute_effective_cpumask(cs->effective_cpus, cs, parent); in update_prstate()
2390 update_partition_exclusive(cs, new_prs); in update_prstate()
2394 cs->partition_root_state = new_prs; in update_prstate()
2395 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2402 if (!list_empty(&cs->css.children)) in update_prstate()
2403 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); in update_prstate()
2406 update_partition_sd_lb(cs, old_prs); in update_prstate()
2408 notify_partition_change(cs, old_prs); in update_prstate()
2522 static int cpuset_can_attach_check(struct cpuset *cs) in cpuset_can_attach_check() argument
2524 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
2525 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
2530 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
2532 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2533 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2540 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
2548 cs = css_cs(css); in cpuset_can_attach()
2553 ret = cpuset_can_attach_check(cs); in cpuset_can_attach()
2557 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
2558 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
2578 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
2579 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
2583 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
2586 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
2587 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
2590 reset_migrate_dl_data(cs); in cpuset_can_attach()
2595 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
2597 reset_migrate_dl_data(cs); in cpuset_can_attach()
2607 cs->attach_in_progress++; in cpuset_can_attach()
2616 struct cpuset *cs; in cpuset_cancel_attach() local
2619 cs = css_cs(css); in cpuset_cancel_attach()
2622 cs->attach_in_progress--; in cpuset_cancel_attach()
2623 if (!cs->attach_in_progress) in cpuset_cancel_attach()
2626 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
2627 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
2629 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
2630 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
2644 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) in cpuset_attach_task() argument
2648 if (cs != &top_cpuset) in cpuset_attach_task()
2652 cs->subparts_cpus); in cpuset_attach_task()
2660 cpuset_update_task_spread_flags(cs, task); in cpuset_attach_task()
2668 struct cpuset *cs; in cpuset_attach() local
2673 cs = css_cs(css); in cpuset_attach()
2677 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
2679 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
2689 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2693 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2696 cpuset_attach_task(cs, task); in cpuset_attach()
2704 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2705 if (!is_memory_migrate(cs) && !mems_updated) in cpuset_attach()
2722 if (is_memory_migrate(cs)) in cpuset_attach()
2731 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2733 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
2734 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
2735 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
2736 reset_migrate_dl_data(cs); in cpuset_attach()
2739 cs->attach_in_progress--; in cpuset_attach()
2740 if (!cs->attach_in_progress) in cpuset_attach()
2770 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2776 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2783 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2786 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2789 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2792 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2795 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2801 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2804 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2819 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2825 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2830 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2848 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2873 css_get(&cs->css); in cpuset_write_resmask()
2879 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2882 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2890 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2893 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2905 css_put(&cs->css); in cpuset_write_resmask()
2920 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2928 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2931 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2934 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2937 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2940 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2952 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2956 return is_cpu_exclusive(cs); in cpuset_read_u64()
2958 return is_mem_exclusive(cs); in cpuset_read_u64()
2960 return is_mem_hardwall(cs); in cpuset_read_u64()
2962 return is_sched_load_balance(cs); in cpuset_read_u64()
2964 return is_memory_migrate(cs); in cpuset_read_u64()
2968 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2970 return is_spread_page(cs); in cpuset_read_u64()
2972 return is_spread_slab(cs); in cpuset_read_u64()
2983 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2987 return cs->relax_domain_level; in cpuset_read_s64()
2998 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
3001 switch (cs->partition_root_state) { in sched_partition_show()
3017 err = perr_strings[READ_ONCE(cs->prs_err)]; in sched_partition_show()
3030 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
3048 css_get(&cs->css); in sched_partition_write()
3051 if (!is_cpuset_online(cs)) in sched_partition_write()
3054 retval = update_prstate(cs, val); in sched_partition_write()
3058 css_put(&cs->css); in sched_partition_write()
3235 struct cpuset *cs; in cpuset_css_alloc() local
3240 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
3241 if (!cs) in cpuset_css_alloc()
3244 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
3245 kfree(cs); in cpuset_css_alloc()
3249 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3250 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
3251 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
3252 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3253 cs->relax_domain_level = -1; in cpuset_css_alloc()
3257 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3259 return &cs->css; in cpuset_css_alloc()
3264 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3265 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3275 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3277 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3279 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3285 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3286 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3287 cs->use_parent_ecpus = true; in cpuset_css_online()
3296 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3326 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3327 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3328 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3329 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3350 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3355 if (is_partition_valid(cs)) in cpuset_css_offline()
3356 update_prstate(cs, 0); in cpuset_css_offline()
3359 is_sched_load_balance(cs)) in cpuset_css_offline()
3360 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3362 if (cs->use_parent_ecpus) { in cpuset_css_offline()
3363 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
3365 cs->use_parent_ecpus = false; in cpuset_css_offline()
3370 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3378 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3380 free_cpuset(cs); in cpuset_css_free()
3407 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork() local
3412 same_cs = (cs == task_cs(current)); in cpuset_can_fork()
3422 ret = cpuset_can_attach_check(cs); in cpuset_can_fork()
3438 cs->attach_in_progress++; in cpuset_can_fork()
3446 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork() local
3450 same_cs = (cs == task_cs(current)); in cpuset_cancel_fork()
3457 cs->attach_in_progress--; in cpuset_cancel_fork()
3458 if (!cs->attach_in_progress) in cpuset_cancel_fork()
3470 struct cpuset *cs; in cpuset_fork() local
3474 cs = task_cs(task); in cpuset_fork()
3475 same_cs = (cs == task_cs(current)); in cpuset_fork()
3479 if (cs == &top_cpuset) in cpuset_fork()
3489 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_fork()
3490 cpuset_attach_task(cs, task); in cpuset_fork()
3492 cs->attach_in_progress--; in cpuset_fork()
3493 if (!cs->attach_in_progress) in cpuset_fork()
3551 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3559 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3564 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3566 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3572 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3579 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3580 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3581 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3582 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3589 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3590 update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks_legacy()
3591 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3592 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3594 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3595 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3604 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3610 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3615 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3616 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3618 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3621 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3622 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3626 update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks()
3628 update_tasks_nodemask(cs); in hotplug_update_tasks()
3647 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3655 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3663 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3668 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3669 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3670 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3672 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3677 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3679 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3688 if (cs->nr_subparts_cpus && is_partition_valid(cs) && in cpuset_hotplug_update_tasks()
3689 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3691 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3692 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3694 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3704 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus || in cpuset_hotplug_update_tasks()
3705 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) { in cpuset_hotplug_update_tasks()
3708 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); in cpuset_hotplug_update_tasks()
3709 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3711 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3712 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3714 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3717 old_prs = cs->partition_root_state; in cpuset_hotplug_update_tasks()
3719 if (is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
3721 make_partition_invalid(cs); in cpuset_hotplug_update_tasks()
3724 WRITE_ONCE(cs->prs_err, PERR_INVPARENT); in cpuset_hotplug_update_tasks()
3726 WRITE_ONCE(cs->prs_err, PERR_NOTPART); in cpuset_hotplug_update_tasks()
3728 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG); in cpuset_hotplug_update_tasks()
3729 notify_partition_change(cs, old_prs); in cpuset_hotplug_update_tasks()
3738 else if (is_partition_valid(parent) && is_partition_invalid(cs)) { in cpuset_hotplug_update_tasks()
3739 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp); in cpuset_hotplug_update_tasks()
3740 if (is_partition_valid(cs)) in cpuset_hotplug_update_tasks()
3745 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3746 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3754 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3757 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3853 struct cpuset *cs; in cpuset_hotplug_workfn() local
3857 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3858 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3862 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3865 css_put(&cs->css); in cpuset_hotplug_workfn()
3943 struct cpuset *cs; in cpuset_cpus_allowed() local
3948 cs = task_cs(tsk); in cpuset_cpus_allowed()
3949 if (cs != &top_cpuset) in cpuset_cpus_allowed()
3956 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { in cpuset_cpus_allowed()
4066 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
4068 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
4069 cs = parent_cs(cs); in nearest_hardwall_ancestor()
4070 return cs; in nearest_hardwall_ancestor()
4115 struct cpuset *cs; /* current cpuset ancestors */ in cpuset_node_allowed() local
4139 cs = nearest_hardwall_ancestor(task_cs(current)); in cpuset_node_allowed()
4140 allowed = node_isset(node, cs->mems_allowed); in cpuset_node_allowed()