Searched refs:sched_group (Results 1 – 6 of 6) sorted by relevance
78 struct sched_group;91 struct sched_group *groups; /* the balancing groups of the domain */191 struct sched_group *__percpu *sg;
38 struct sched_group *group = sd->groups; in sched_domain_debug_one()606 static void free_sched_groups(struct sched_group *sg, int free_sgc) in free_sched_groups()608 struct sched_group *tmp, *first; in free_sched_groups()745 struct sched_group *sg = sd->groups; in cpu_attach_domain()792 int group_balance_cpu(struct sched_group *sg) in group_balance_cpu()904 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask()940 static struct sched_group *943 struct sched_group *sg; in build_group_from_child_sched_domain()946 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in build_group_from_child_sched_domain()965 struct sched_group *sg) in init_overlap_sched_group()[all …]
1215 struct sched_group;1217 static inline struct cpumask *sched_group_span(struct sched_group *sg);1295 struct sched_group *group) in sched_group_cookie_match()1355 struct sched_group *group) in sched_group_cookie_match()1901 struct sched_group { struct1902 struct sched_group *next; /* Must be a circular list */ argument1921 static inline struct cpumask *sched_group_span(struct sched_group *sg) in sched_group_span() argument1929 static inline struct cpumask *group_balance_mask(struct sched_group *sg) in group_balance_mask()1934 extern int group_balance_cpu(struct sched_group *sg);
6995 static struct sched_group *7002 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu()7074 struct sched_group *group; in find_idlest_cpu()9336 struct sched_group *busiest; /* Busiest group in this sd */9337 struct sched_group *local; /* Local group in this sd */9401 struct sched_group *sdg = sd->groups; in update_cpu_capacity()9419 struct sched_group *group, *sdg = sd->groups; in update_group_capacity()9524 static inline int sg_imbalanced(struct sched_group *group) in sg_imbalanced()9585 struct sched_group *group, in group_classify()9651 struct sched_group *group) in sched_asym()[all …]
28 每个调度域必须具有一个或多个CPU调度组(struct sched_group),它们以单向循环链表的形式
19 Each scheduling domain must have one or more CPU groups (struct sched_group)