Home
last modified time | relevance | path

Searched refs:memcg (Results 1 – 25 of 59) sorted by relevance

123

/linux-6.6.21/mm/
Dmemcontrol.c138 struct mem_cgroup *memcg; member
152 int (*register_event)(struct mem_cgroup *memcg,
159 void (*unregister_event)(struct mem_cgroup *memcg,
171 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
242 if (!memcg) in memcg_to_vmpressure()
243 memcg = root_mem_cgroup; in memcg_to_vmpressure()
244 return &memcg->vmpressure; in memcg_to_vmpressure()
324 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, in memcg_reparent_objcgs() argument
[all …]
Dshrinker_debug.c17 struct mem_cgroup *memcg, in shrinker_count_objects() argument
28 .memcg = memcg, in shrinker_count_objects()
49 struct mem_cgroup *memcg; in shrinker_debugfs_count_show() local
67 memcg = mem_cgroup_iter(NULL, NULL, NULL); in shrinker_debugfs_count_show()
69 if (memcg && !mem_cgroup_online(memcg)) in shrinker_debugfs_count_show()
73 memcg_aware ? memcg : NULL, in shrinker_debugfs_count_show()
76 seq_printf(m, "%lu", mem_cgroup_ino(memcg)); in shrinker_debugfs_count_show()
83 mem_cgroup_iter_break(NULL, memcg); in shrinker_debugfs_count_show()
88 mem_cgroup_iter_break(NULL, memcg); in shrinker_debugfs_count_show()
92 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in shrinker_debugfs_count_show()
[all …]
Dlist_lru.c69 struct mem_cgroup *memcg = NULL; in list_lru_from_kmem() local
74 memcg = mem_cgroup_from_slab_obj(ptr); in list_lru_from_kmem()
75 if (!memcg) in list_lru_from_kmem()
78 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); in list_lru_from_kmem()
81 *memcg_ptr = memcg; in list_lru_from_kmem()
123 struct mem_cgroup *memcg; in list_lru_add() local
128 l = list_lru_from_kmem(lru, nid, item, &memcg); in list_lru_add()
132 set_shrinker_bit(memcg, nid, in list_lru_add()
179 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument
185 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); in list_lru_count_one()
[all …]
Dvmpressure.c77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); in vmpressure_parent() local
79 memcg = parent_mem_cgroup(memcg); in vmpressure_parent()
80 if (!memcg) in vmpressure_parent()
82 return memcg_to_vmpressure(memcg); in vmpressure_parent()
239 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument
255 vmpr = memcg_to_vmpressure(memcg); in vmpressure()
295 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure()
319 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); in vmpressure()
335 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument
351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio()
[all …]
Dvmscan.c208 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, in shrinker_info_protected() argument
211 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected()
215 static int expand_one_shrinker_info(struct mem_cgroup *memcg, in expand_one_shrinker_info() argument
226 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info()
227 old = shrinker_info_protected(memcg, nid); in expand_one_shrinker_info()
259 void free_shrinker_info(struct mem_cgroup *memcg) in free_shrinker_info() argument
266 pn = memcg->nodeinfo[nid]; in free_shrinker_info()
273 int alloc_shrinker_info(struct mem_cgroup *memcg) in alloc_shrinker_info() argument
286 free_shrinker_info(memcg); in alloc_shrinker_info()
293 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info()
[all …]
Dworkingset.c242 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_eviction() local
247 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction()
255 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); in lru_gen_eviction()
267 struct mem_cgroup *memcg; in lru_gen_test_recent() local
272 memcg = mem_cgroup_from_id(memcg_id); in lru_gen_test_recent()
273 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent()
512 struct mem_cgroup *memcg; in workingset_refault() local
536 memcg = folio_memcg(folio); in workingset_refault()
538 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault()
569 struct mem_cgroup *memcg; in workingset_activation() local
[all …]
Dmmap_lock.c202 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); in get_mm_memcg_path() local
204 if (memcg == NULL) in get_mm_memcg_path()
206 if (unlikely(memcg->css.cgroup == NULL)) in get_mm_memcg_path()
213 cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE); in get_mm_memcg_path()
216 css_put(&memcg->css); in get_mm_memcg_path()
Doom_kill.c73 return oc->memcg != NULL; in is_memcg_oom()
260 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc()
369 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process()
429 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks()
447 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary()
462 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header()
1040 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
Dpage_owner.c366 struct mem_cgroup *memcg; in print_page_owner_memcg() local
379 memcg = page_memcg_check(page); in print_page_owner_memcg()
380 if (!memcg) in print_page_owner_memcg()
383 online = (memcg->css.flags & CSS_ONLINE); in print_page_owner_memcg()
384 cgroup_name(memcg->css.cgroup, name, sizeof(name)); in print_page_owner_memcg()
Dpage_io.c221 struct mem_cgroup *memcg; in bio_associate_blkg_from_page() local
223 memcg = folio_memcg(folio); in bio_associate_blkg_from_page()
224 if (!memcg) in bio_associate_blkg_from_page()
228 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); in bio_associate_blkg_from_page()
/linux-6.6.21/include/linux/
Dmemcontrol.h140 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member
195 struct mem_cgroup *memcg; member
370 return READ_ONCE(objcg->memcg); in obj_cgroup_memcg()
528 struct mem_cgroup *memcg; in get_mem_cgroup_from_objcg() local
532 memcg = obj_cgroup_memcg(objcg); in get_mem_cgroup_from_objcg()
533 if (unlikely(!css_tryget(&memcg->css))) in get_mem_cgroup_from_objcg()
537 return memcg; in get_mem_cgroup_from_objcg()
570 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument
572 return (memcg == root_mem_cgroup); in mem_cgroup_is_root()
581 struct mem_cgroup *memcg, in mem_cgroup_protection() argument
[all …]
Dvmpressure.h33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
41 extern int vmpressure_register_event(struct mem_cgroup *memcg,
44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument
49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
Dlist_lru.h70 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
72 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
116 int nid, struct mem_cgroup *memcg);
122 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count()
166 int nid, struct mem_cgroup *memcg,
183 int nid, struct mem_cgroup *memcg,
194 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk()
202 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
Dswap.h409 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
613 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument
620 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness()
623 return READ_ONCE(memcg->swappiness); in mem_cgroup_swappiness()
665 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
683 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
Dmmzone.h554 void lru_gen_init_memcg(struct mem_cgroup *memcg);
555 void lru_gen_exit_memcg(struct mem_cgroup *memcg);
556 void lru_gen_online_memcg(struct mem_cgroup *memcg);
557 void lru_gen_offline_memcg(struct mem_cgroup *memcg);
558 void lru_gen_release_memcg(struct mem_cgroup *memcg);
559 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
590 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) in lru_gen_init_memcg() argument
594 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) in lru_gen_exit_memcg() argument
598 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) in lru_gen_online_memcg() argument
602 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) in lru_gen_offline_memcg() argument
[all …]
/linux-6.6.21/tools/testing/selftests/cgroup/
Dtest_memcontrol.c170 char *memcg; in test_memcg_current() local
172 memcg = cg_name(root, "memcg_test"); in test_memcg_current()
173 if (!memcg) in test_memcg_current()
176 if (cg_create(memcg)) in test_memcg_current()
179 current = cg_read_long(memcg, "memory.current"); in test_memcg_current()
183 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current()
186 if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) in test_memcg_current()
192 cg_destroy(memcg); in test_memcg_current()
193 free(memcg); in test_memcg_current()
251 static bool reclaim_until(const char *memcg, long goal);
[all …]
Dmemcg_protection.m5 % This script simulates reclaim protection behavior on a single level of memcg
65 % commit 1bc63fb1272b ("mm, memcg: make scan aggression always exclude protection")
/linux-6.6.21/tools/cgroup/
Dmemcg_slabinfo.py42 memcg = container_of(css, 'struct mem_cgroup', 'css')
43 MEMCGS[css.cgroup.kn.id.value_()] = memcg
171 memcg = MEMCGS[cgroup_id]
187 obj_cgroups.add(memcg.objcg.value_())
189 memcg.objcg_list.address_of_(),
221 memcg.kmem_caches.address_of_(),
/linux-6.6.21/Documentation/admin-guide/cgroup-v1/
Dmemcg_test.rst9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior
10 is complex. This is a document for memcg's internal behavior.
61 At commit(), the page is associated with the memcg.
114 But brief explanation of the behavior of memcg around shmem will be
136 Each memcg has its own vector of LRUs (inactive anon, active anon,
138 each LRU handled under a single lru_lock for that memcg and node.
145 9.1 Small limit to memcg.
148 When you do test to do racy case, it's good test to set memcg's limit
158 Historically, memcg's shmem handling was poor and we saw some amount
248 Besides management of swap is one of complicated parts of memcg,
[all …]
/linux-6.6.21/include/linux/sched/
Dmm.h410 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
416 this_cpu_write(int_active_memcg, memcg); in set_active_memcg()
419 current->active_memcg = memcg; in set_active_memcg()
426 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
/linux-6.6.21/Documentation/translations/zh_CN/mm/
Dhwpoison.rst119 corrupt-filter-memcg
120 限制注入到memgroup拥有的页面。由memcg的inode号指定。
130 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
/linux-6.6.21/mm/damon/
Dpaddr.c192 struct mem_cgroup *memcg; in __damos_pa_filter_out() local
200 memcg = folio_memcg_check(folio); in __damos_pa_filter_out()
201 if (!memcg) in __damos_pa_filter_out()
204 matched = filter->memcg_id == mem_cgroup_id(memcg); in __damos_pa_filter_out()
/linux-6.6.21/kernel/bpf/
Dmemalloc.c206 struct mem_cgroup *memcg = NULL, *old_memcg; in alloc_bulk() local
237 memcg = get_memcg(c); in alloc_bulk()
238 old_memcg = set_active_memcg(memcg); in alloc_bulk()
251 mem_cgroup_put(memcg); in alloc_bulk()
920 struct mem_cgroup *memcg, *old_memcg; in bpf_mem_cache_alloc_flags() local
922 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags()
923 old_memcg = set_active_memcg(memcg); in bpf_mem_cache_alloc_flags()
928 mem_cgroup_put(memcg); in bpf_mem_cache_alloc_flags()
/linux-6.6.21/Documentation/admin-guide/mm/
Dshrinker_debugfs.rst14 trigger *count_objects()* and *scan_objects()* callbacks for each memcg and
59 If the shrinker is not memcg-aware or CONFIG_MEMCG is off, 0 is printed
112 For a non-memcg-aware shrinker or on a system with no memory
/linux-6.6.21/Documentation/mm/
Dmultigen_lru.rst162 An ``mm_struct`` list is maintained for each memcg, and an
163 ``mm_struct`` follows its owner task to the new memcg when this task
173 ``mm_struct`` was migrated, pages left in the previous memcg will be
174 ignored when the current memcg is under reclaim. Similarly, page table
225 An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
226 since each node and memcg combination has an LRU of folios (see
229 data centers. Note that memcg LRU only applies to global reclaim.
231 The basic structure of an memcg LRU can be understood by an analogy to
238 3. Other events trigger similar operations, e.g., offlining an memcg
243 1. Sharding, which allows each thread to start at a random memcg (in

123