Searched refs:sched_group (Results 1 – 6 of 6) sorted by relevance
78 struct sched_group;91 struct sched_group *groups; /* the balancing groups of the domain */191 struct sched_group *__percpu *sg;
36 struct sched_group *group = sd->groups; in sched_domain_debug_one()604 static void free_sched_groups(struct sched_group *sg, int free_sgc) in free_sched_groups()606 struct sched_group *tmp, *first; in free_sched_groups()739 struct sched_group *sg = sd->groups; in cpu_attach_domain()786 int group_balance_cpu(struct sched_group *sg) in group_balance_cpu()898 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask()934 static struct sched_group *937 struct sched_group *sg; in build_group_from_child_sched_domain()940 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in build_group_from_child_sched_domain()959 struct sched_group *sg) in init_overlap_sched_group()[all …]
1199 struct sched_group;1201 static inline struct cpumask *sched_group_span(struct sched_group *sg);1277 struct sched_group *group) in sched_group_cookie_match()1337 struct sched_group *group) in sched_group_cookie_match()1833 struct sched_group { struct1834 struct sched_group *next; /* Must be a circular list */ argument1852 static inline struct cpumask *sched_group_span(struct sched_group *sg) in sched_group_span() argument1860 static inline struct cpumask *group_balance_mask(struct sched_group *sg) in group_balance_mask()1865 extern int group_balance_cpu(struct sched_group *sg);
6435 static struct sched_group *6442 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu()6514 struct sched_group *group; in find_idlest_cpu()8761 struct sched_group *busiest; /* Busiest group in this sd */8762 struct sched_group *local; /* Local group in this sd */8826 struct sched_group *sdg = sd->groups; in update_cpu_capacity()8844 struct sched_group *group, *sdg = sd->groups; in update_group_capacity()8949 static inline int sg_imbalanced(struct sched_group *group) in sg_imbalanced()9010 struct sched_group *group, in group_classify()9057 struct sched_group *sg) in asym_smt_can_pull_tasks()[all …]
28 每个调度域必须具有一个或多个CPU调度组(struct sched_group),它们以单向循环链表的形式
19 Each scheduling domain must have one or more CPU groups (struct sched_group)