Lines Matching refs:mp

47 static int xfs_icwalk(struct xfs_mount *mp,
72 struct xfs_mount *mp, in xfs_inode_alloc() argument
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); in xfs_inode_alloc()
83 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
93 XFS_STATS_INC(mp, vn_active); in xfs_inode_alloc()
99 ip->i_mount = mp; in xfs_inode_alloc()
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; in xfs_inode_alloc()
190 struct xfs_mount *mp) in xfs_reclaim_work_queue() argument
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_work_queue()
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, in xfs_reclaim_work_queue()
209 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_queue() local
211 if (!xfs_is_blockgc_enabled(mp)) in xfs_blockgc_queue()
229 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_inode_tag() local
244 spin_lock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_set_inode_tag()
246 spin_unlock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
251 xfs_reclaim_work_queue(mp); in xfs_perag_set_inode_tag()
268 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_clear_inode_tag() local
288 spin_lock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_clear_inode_tag()
290 spin_unlock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
305 struct xfs_mount *mp, in xfs_reinit_inode() argument
317 error = inode_init_always(mp->m_super, inode); in xfs_reinit_inode()
339 struct xfs_mount *mp = ip->i_mount; in xfs_iget_recycle() local
360 error = xfs_reinit_inode(mp, inode); in xfs_iget_recycle()
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_iget_recycle()
440 struct xfs_mount *mp) in xfs_inodegc_queue_all() argument
446 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_queue_all()
447 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_queue_all()
449 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_queue_all()
460 struct xfs_mount *mp) in xfs_inodegc_wait_all() argument
465 flush_workqueue(mp->m_inodegc_wq); in xfs_inodegc_wait_all()
466 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_wait_all()
469 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_wait_all()
490 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit() local
571 XFS_STATS_INC(mp, xs_ig_found); in xfs_iget_cache_hit()
577 XFS_STATS_INC(mp, xs_ig_frecycle); in xfs_iget_cache_hit()
591 if (xfs_is_inodegc_enabled(mp)) in xfs_iget_cache_hit()
592 xfs_inodegc_queue_all(mp); in xfs_iget_cache_hit()
598 struct xfs_mount *mp, in xfs_iget_cache_miss() argument
608 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); in xfs_iget_cache_miss()
611 ip = xfs_inode_alloc(mp, ino); in xfs_iget_cache_miss()
629 if (xfs_has_v3inodes(mp) && in xfs_iget_cache_miss()
630 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) { in xfs_iget_cache_miss()
635 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); in xfs_iget_cache_miss()
701 XFS_STATS_INC(mp, xs_ig_dup); in xfs_iget_cache_miss()
736 struct xfs_mount *mp, in xfs_iget() argument
751 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) in xfs_iget()
754 XFS_STATS_INC(mp, xs_ig_attempts); in xfs_iget()
757 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); in xfs_iget()
758 agino = XFS_INO_TO_AGINO(mp, ino); in xfs_iget()
775 XFS_STATS_INC(mp, xs_ig_missed); in xfs_iget()
777 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, in xfs_iget()
956 struct xfs_mount *mp) in xfs_want_reclaim_sick() argument
958 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) || in xfs_want_reclaim_sick()
959 xfs_is_shutdown(mp); in xfs_want_reclaim_sick()
964 struct xfs_mount *mp) in xfs_reclaim_inodes() argument
970 if (xfs_want_reclaim_sick(mp)) in xfs_reclaim_inodes()
973 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_inodes()
974 xfs_ail_push_all_sync(mp->m_ail); in xfs_reclaim_inodes()
975 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); in xfs_reclaim_inodes()
988 struct xfs_mount *mp, in xfs_reclaim_inodes_nr() argument
996 if (xfs_want_reclaim_sick(mp)) in xfs_reclaim_inodes_nr()
1000 xfs_reclaim_work_queue(mp); in xfs_reclaim_inodes_nr()
1001 xfs_ail_push_all(mp->m_ail); in xfs_reclaim_inodes_nr()
1003 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); in xfs_reclaim_inodes_nr()
1013 struct xfs_mount *mp) in xfs_reclaim_inodes_count() argument
1019 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { in xfs_reclaim_inodes_count()
1111 struct xfs_mount *mp = container_of(to_delayed_work(work), in xfs_reclaim_worker() local
1114 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); in xfs_reclaim_worker()
1115 xfs_reclaim_work_queue(mp); in xfs_reclaim_worker()
1166 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_set_iflag() local
1181 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_set_iflag()
1184 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_set_iflag()
1204 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_clear_iflag() local
1218 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_clear_iflag()
1221 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_clear_iflag()
1347 struct xfs_mount *mp) in xfs_blockgc_stop() argument
1352 if (!xfs_clear_blockgc_enabled(mp)) in xfs_blockgc_stop()
1355 for_each_perag(mp, agno, pag) in xfs_blockgc_stop()
1357 trace_xfs_blockgc_stop(mp, __return_address); in xfs_blockgc_stop()
1363 struct xfs_mount *mp) in xfs_blockgc_start() argument
1368 if (xfs_set_blockgc_enabled(mp)) in xfs_blockgc_start()
1371 trace_xfs_blockgc_start(mp, __return_address); in xfs_blockgc_start()
1372 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_start()
1448 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_worker() local
1451 trace_xfs_blockgc_worker(mp, __return_address); in xfs_blockgc_worker()
1455 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", in xfs_blockgc_worker()
1466 struct xfs_mount *mp, in xfs_blockgc_free_space() argument
1471 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_); in xfs_blockgc_free_space()
1473 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw); in xfs_blockgc_free_space()
1477 return xfs_inodegc_flush(mp); in xfs_blockgc_free_space()
1486 struct xfs_mount *mp) in xfs_blockgc_flush_all() argument
1491 trace_xfs_blockgc_flush_all(mp, __return_address); in xfs_blockgc_flush_all()
1498 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_flush_all()
1502 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_flush_all()
1505 return xfs_inodegc_flush(mp); in xfs_blockgc_flush_all()
1520 struct xfs_mount *mp, in xfs_blockgc_free_dquots() argument
1538 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { in xfs_blockgc_free_dquots()
1539 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); in xfs_blockgc_free_dquots()
1544 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { in xfs_blockgc_free_dquots()
1545 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); in xfs_blockgc_free_dquots()
1550 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { in xfs_blockgc_free_dquots()
1559 return xfs_blockgc_free_space(mp, &icw); in xfs_blockgc_free_dquots()
1639 struct xfs_mount *mp = pag->pag_mount; in xfs_icwalk_ag() local
1692 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) in xfs_icwalk_ag()
1694 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_icwalk_ag()
1695 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_icwalk_ag()
1744 struct xfs_mount *mp, in xfs_icwalk() argument
1753 for_each_perag_tag(mp, agno, pag, goal) { in xfs_icwalk()
1798 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_set_reclaimable() local
1801 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { in xfs_inodegc_set_reclaimable()
1807 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inodegc_set_reclaimable()
1814 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inodegc_set_reclaimable()
1848 struct xfs_mount *mp = gc->mp; in xfs_inodegc_worker() local
1857 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask); in xfs_inodegc_worker()
1873 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits)); in xfs_inodegc_worker()
1894 struct xfs_mount *mp) in xfs_inodegc_push() argument
1896 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_push()
1898 trace_xfs_inodegc_push(mp, __return_address); in xfs_inodegc_push()
1899 xfs_inodegc_queue_all(mp); in xfs_inodegc_push()
1908 struct xfs_mount *mp) in xfs_inodegc_flush() argument
1910 xfs_inodegc_push(mp); in xfs_inodegc_flush()
1911 trace_xfs_inodegc_flush(mp, __return_address); in xfs_inodegc_flush()
1912 return xfs_inodegc_wait_all(mp); in xfs_inodegc_flush()
1922 struct xfs_mount *mp) in xfs_inodegc_stop() argument
1926 if (!xfs_clear_inodegc_enabled(mp)) in xfs_inodegc_stop()
1939 xfs_inodegc_queue_all(mp); in xfs_inodegc_stop()
1941 flush_workqueue(mp->m_inodegc_wq); in xfs_inodegc_stop()
1942 rerun = xfs_inodegc_queue_all(mp); in xfs_inodegc_stop()
1945 trace_xfs_inodegc_stop(mp, __return_address); in xfs_inodegc_stop()
1955 struct xfs_mount *mp) in xfs_inodegc_start() argument
1957 if (xfs_set_inodegc_enabled(mp)) in xfs_inodegc_start()
1960 trace_xfs_inodegc_start(mp, __return_address); in xfs_inodegc_start()
1961 xfs_inodegc_queue_all(mp); in xfs_inodegc_start()
1969 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_rt_file() local
1974 if (__percpu_counter_compare(&mp->m_frextents, in xfs_inodegc_want_queue_rt_file()
1975 mp->m_low_rtexts[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_rt_file()
1997 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_work() local
1999 if (items > mp->m_ino_geo.inodes_per_cluster) in xfs_inodegc_want_queue_work()
2002 if (__percpu_counter_compare(&mp->m_fdblocks, in xfs_inodegc_want_queue_work()
2003 mp->m_low_space[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_work()
2064 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_queue() local
2077 gc = this_cpu_ptr(mp->m_inodegc); in xfs_inodegc_queue()
2089 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask)) in xfs_inodegc_queue()
2090 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask); in xfs_inodegc_queue()
2096 if (!xfs_is_inodegc_enabled(mp)) { in xfs_inodegc_queue()
2104 trace_xfs_inodegc_queue(mp, __return_address); in xfs_inodegc_queue()
2105 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, in xfs_inodegc_queue()
2110 trace_xfs_inodegc_throttle(mp, __return_address); in xfs_inodegc_queue()
2129 struct xfs_mount *mp = ip->i_mount; in xfs_inode_mark_reclaimable() local
2132 XFS_STATS_INC(mp, vn_reclaim); in xfs_inode_mark_reclaimable()
2168 struct xfs_mount *mp = container_of(shrink, struct xfs_mount, in xfs_inodegc_shrinker_count() local
2173 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_shrinker_count()
2176 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_shrinker_count()
2177 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_count()
2190 struct xfs_mount *mp = container_of(shrink, struct xfs_mount, in xfs_inodegc_shrinker_scan() local
2196 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_shrinker_scan()
2199 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address); in xfs_inodegc_shrinker_scan()
2201 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_shrinker_scan()
2202 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_scan()
2207 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_shrinker_scan()
2225 struct xfs_mount *mp) in xfs_inodegc_register_shrinker() argument
2227 struct shrinker *shrink = &mp->m_inodegc_shrinker; in xfs_inodegc_register_shrinker()
2235 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id); in xfs_inodegc_register_shrinker()