/linux-6.6.21/mm/ |
D | swap.c | 84 struct lruvec *lruvec; in __page_cache_release() local 87 lruvec = folio_lruvec_lock_irqsave(folio, &flags); in __page_cache_release() 88 lruvec_del_folio(lruvec, folio); in __page_cache_release() 90 unlock_page_lruvec_irqrestore(lruvec, flags); in __page_cache_release() 161 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 163 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) in lru_add_fn() argument 199 lruvec_add_folio(lruvec, folio); in lru_add_fn() 206 struct lruvec *lruvec = NULL; in folio_batch_move_lru() local 216 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); in folio_batch_move_lru() 217 move_fn(lruvec, folio); in folio_batch_move_lru() [all …]
|
D | workingset.c | 236 struct lruvec *lruvec; in lru_gen_eviction() local 247 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 248 lrugen = &lruvec->lrugen; in lru_gen_eviction() 262 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, in lru_gen_test_recent() argument 273 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 275 min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); in lru_gen_test_recent() 285 struct lruvec *lruvec; in lru_gen_refault() local 292 recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); in lru_gen_refault() 293 if (lruvec != folio_lruvec(folio)) in lru_gen_refault() 296 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); in lru_gen_refault() [all …]
|
D | vmscan.c | 654 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, in lruvec_lru_size() argument 661 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() 667 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size() 2250 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument 2259 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes() 2305 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_folios() argument 2309 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios() 2394 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_folios() 2428 struct lruvec *lruvec; in folio_isolate_lru() local 2431 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru() [all …]
|
D | mlock.c | 61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument 65 return lruvec; in __mlock_folio() 67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio() 76 lruvec_del_folio(lruvec, folio); in __mlock_folio() 78 lruvec_add_folio(lruvec, folio); in __mlock_folio() 92 lruvec_del_folio(lruvec, folio); in __mlock_folio() 96 lruvec_add_folio(lruvec, folio); in __mlock_folio() 100 return lruvec; in __mlock_folio() 103 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_new_folio() argument 107 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_new_folio() [all …]
|
D | mmzone.c | 75 void lruvec_init(struct lruvec *lruvec) in lruvec_init() argument 79 memset(lruvec, 0, sizeof(struct lruvec)); in lruvec_init() 80 spin_lock_init(&lruvec->lru_lock); in lruvec_init() 83 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init() 90 list_del(&lruvec->lists[LRU_UNEVICTABLE]); in lruvec_init() 92 lru_gen_init_lruvec(lruvec); in lruvec_init()
|
D | memcontrol.c | 791 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_memcg_lruvec_state() argument 797 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in __mod_memcg_lruvec_state() 841 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_lruvec_state() argument 845 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in __mod_lruvec_state() 849 __mod_memcg_lruvec_state(lruvec, idx, val); in __mod_lruvec_state() 858 struct lruvec *lruvec; in __mod_lruvec_page_state() local 869 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_page_state() 870 __mod_lruvec_state(lruvec, idx, val); in __mod_lruvec_page_state() 879 struct lruvec *lruvec; in __mod_lruvec_kmem_state() local 893 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_kmem_state() [all …]
|
D | compaction.c | 843 struct lruvec *lruvec; in isolate_migratepages_block() local 845 struct lruvec *locked = NULL; in isolate_migratepages_block() 1123 lruvec = folio_lruvec(folio); in isolate_migratepages_block() 1126 if (lruvec != locked) { in isolate_migratepages_block() 1130 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block() 1131 locked = lruvec; in isolate_migratepages_block() 1133 lruvec_memcg_debug(lruvec, folio); in isolate_migratepages_block() 1166 lruvec_del_folio(lruvec, folio); in isolate_migratepages_block()
|
D | huge_memory.c | 2382 struct lruvec *lruvec, struct list_head *list) in lru_add_page_tail() argument 2387 lockdep_assert_held(&lruvec->lru_lock); in lru_add_page_tail() 2406 struct lruvec *lruvec, struct list_head *list) in __split_huge_page_tail() argument 2493 lru_add_page_tail(head, page_tail, lruvec, list); in __split_huge_page_tail() 2501 struct lruvec *lruvec; in __split_huge_page() local 2517 lruvec = folio_lruvec_lock(folio); in __split_huge_page() 2522 __split_huge_page_tail(folio, i, lruvec, list); in __split_huge_page() 2544 unlock_page_lruvec(lruvec); in __split_huge_page()
|
D | migrate.c | 486 struct lruvec *old_lruvec, *new_lruvec; in folio_migrate_mapping()
|
/linux-6.6.21/include/linux/ |
D | mm_inline.h | 37 static __always_inline void __update_lru_size(struct lruvec *lruvec, in __update_lru_size() argument 41 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in __update_lru_size() 43 lockdep_assert_held(&lruvec->lru_lock); in __update_lru_size() 46 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in __update_lru_size() 51 static __always_inline void update_lru_size(struct lruvec *lruvec, in update_lru_size() argument 55 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size() 57 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size() 164 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) in lru_gen_is_active() argument 166 unsigned long max_seq = lruvec->lrugen.max_seq; in lru_gen_is_active() 174 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, in lru_gen_update_size() argument [all …]
|
D | memcontrol.h | 125 struct lruvec lruvec; member 727 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, in mem_cgroup_lruvec() 731 struct lruvec *lruvec; in mem_cgroup_lruvec() local 734 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec() 742 lruvec = &mz->lruvec; in mem_cgroup_lruvec() 749 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec() 750 lruvec->pgdat = pgdat; in mem_cgroup_lruvec() 751 return lruvec; in mem_cgroup_lruvec() 760 static inline struct lruvec *folio_lruvec(struct folio *folio) in folio_lruvec() 772 struct lruvec *folio_lruvec_lock(struct folio *folio); [all …]
|
D | vmstat.h | 546 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 549 static inline void mod_lruvec_state(struct lruvec *lruvec, in mod_lruvec_state() argument 555 __mod_lruvec_state(lruvec, idx, val); in mod_lruvec_state() 574 static inline void __mod_lruvec_state(struct lruvec *lruvec, in __mod_lruvec_state() argument 577 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in __mod_lruvec_state() 580 static inline void mod_lruvec_state(struct lruvec *lruvec, in mod_lruvec_state() argument 583 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in mod_lruvec_state()
|
D | mmzone.h | 373 struct lruvec; 476 struct lruvec *lruvec; member 491 void lru_gen_init_lruvec(struct lruvec *lruvec); 580 static inline void lru_gen_init_lruvec(struct lruvec *lruvec) in lru_gen_init_lruvec() argument 618 struct lruvec { struct 1383 struct lruvec __lruvec; 1443 extern void lruvec_init(struct lruvec *lruvec); 1445 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) in lruvec_pgdat() argument 1448 return lruvec->pgdat; in lruvec_pgdat() 1450 return container_of(lruvec, struct pglist_data, __lruvec); in lruvec_pgdat()
|
D | swap.h | 347 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); 370 void lru_note_cost(struct lruvec *lruvec, bool file,
|
/linux-6.6.21/Documentation/mm/ |
D | multigen_lru.rst | 83 ``lruvec``. The youngest generation number is stored in 114 The aging produces young generations. Given an ``lruvec``, it 130 The eviction consumes old generations. Given an ``lruvec``, it 148 set, an ``lruvec`` is protected from the eviction when its oldest
|
D | physical_memory.rst | 267 Per-node lruvec holding LRU lists and related parameters. Used only when
|
/linux-6.6.21/Documentation/trace/ |
D | events-kmem.rst | 72 contention on the lruvec->lru_lock.
|
/linux-6.6.21/Documentation/admin-guide/cgroup-v1/ |
D | memory.rst | 307 lruvec->lru_lock. 310 lruvec->lru_lock; PG_lru bit of page->flags is cleared before 311 isolating a page from its LRU under lruvec->lru_lock.
|