Home
last modified time | relevance | path

Searched refs:i_gl (Results 1 – 21 of 21) sorted by relevance

/linux-6.1.9/fs/gfs2/
Dsuper.c132 struct gfs2_glock *j_gl = ip->i_gl; in gfs2_make_fs_rw()
186 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, in gfs2_statfs_init()
225 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); in gfs2_statfs_change()
252 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); in update_statfs()
253 gfs2_trans_add_meta(m_ip->i_gl, m_bh); in update_statfs()
276 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, in gfs2_statfs_sync()
340 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); in gfs2_lock_fs_check_clean()
429 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); in gfs2_write_inode()
435 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, in gfs2_write_inode()
480 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_dirty_inode()
[all …]
Dinode.c136 &ip->i_gl); in gfs2_inode_lookup()
162 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, in gfs2_inode_lookup()
169 gfs2_inode_already_deleted(ip->i_gl, no_formal_ino)) in gfs2_inode_lookup()
180 set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags); in gfs2_inode_lookup()
186 glock_set_object(ip->i_gl, ip); in gfs2_inode_lookup()
192 glock_clear_object(ip->i_gl, ip); in gfs2_inode_lookup()
311 if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) { in gfs2_lookupi()
312 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_lookupi()
451 bh = gfs2_meta_new(ip->i_gl, ip->i_eattr); in gfs2_init_xattr()
452 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_init_xattr()
[all …]
Dxattr.c131 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh); in ea_foreach()
155 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh); in ea_foreach()
278 gfs2_trans_add_meta(ip->i_gl, bh); in ea_dealloc_unstuffed()
425 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_listxattr()
471 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0, in gfs2_iter_unstuffed()
504 gfs2_trans_add_meta(ip->i_gl, bh[x]); in gfs2_iter_unstuffed()
613 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_xattr_get()
614 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); in gfs2_xattr_get()
646 *bhp = gfs2_meta_new(ip->i_gl, block); in ea_alloc_blk()
647 gfs2_trans_add_meta(ip->i_gl, *bhp); in ea_alloc_blk()
[all …]
Dutil.c60 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | in check_journal_clean()
125 struct gfs2_glock *i_gl; in signal_our_withdraw() local
137 i_gl = ip->i_gl; in signal_our_withdraw()
196 wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, in signal_our_withdraw()
218 if (i_gl->gl_ops->go_free) { in signal_our_withdraw()
219 set_bit(GLF_FREEING, &i_gl->gl_flags); in signal_our_withdraw()
220 wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); in signal_our_withdraw()
465 gfs2_dump_glock(NULL, ip->i_gl, 1); in gfs2_consist_inode_i()
Dacl.c70 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_get_acl()
71 int ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_get_acl()
128 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_set_acl()
129 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_set_acl()
Dfile.c65 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_llseek()
111 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_readdir()
168 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fileattr_get()
227 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in do_gfs2_set_flags()
244 gfs2_log_flush(sdp, ip->i_gl, in do_gfs2_set_flags()
263 gfs2_trans_add_meta(ip->i_gl, bh); in do_gfs2_set_flags()
431 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_page_mkwrite()
456 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_page_mkwrite()
557 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fault()
597 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_mmap()
[all …]
Ddir.c94 bh = gfs2_meta_new(ip->i_gl, block); in gfs2_dir_get_new_buffer()
95 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_get_new_buffer()
108 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, 0, &bh); in gfs2_dir_get_existing_buffer()
129 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_stuffed()
210 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_write_data()
232 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_data()
301 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_dir_read_data()
303 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, 0, &bh); in gfs2_dir_read_data()
678 gfs2_trans_add_meta(dip->i_gl, bh); in dirent_del()
717 gfs2_trans_add_meta(ip->i_gl, bh); in do_init_dirent()
[all …]
Ddentry.c63 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); in gfs2_drevalidate()
65 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_drevalidate()
Dbmap.c86 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_unstuffer_page()
132 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_unstuff_inode()
680 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_iomap_alloc()
724 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); in __gfs2_iomap_alloc()
750 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); in __gfs2_iomap_alloc()
752 gfs2_indirect_init(mp, ip->i_gl, i, in __gfs2_iomap_alloc()
762 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); in __gfs2_iomap_alloc()
1169 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_iomap_end()
1370 gfs2_trans_add_meta(ip->i_gl, dibh); in trunc_start()
1542 gfs2_trans_add_meta(ip->i_gl, bh); in sweep_bh_for_rgrps()
[all …]
Daops.c57 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_page_add_databufs()
158 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) in gfs2_jdata_writepage()
393 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in gfs2_jdata_writepages()
600 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_bmap()
Dlops.c793 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements()
848 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan()
854 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan()
1017 struct gfs2_glock *gl = ip->i_gl; in databuf_lo_scan_elements()
1068 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan()
1075 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan()
Drecovery.c36 struct gfs2_glock *gl = ip->i_gl; in gfs2_replay_read_block()
351 gfs2_inode_metasync(ip->i_gl); in update_statfs_inode()
445 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_recover_func()
Dquota.c394 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, in bh_get()
672 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc()
751 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page()
916 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync()
975 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync()
1032 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock()
1392 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init()
1679 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
Dmeta_io.c448 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); in gfs2_journal_wipe()
483 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_buffer()
Dtrace_gfs2.h455 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
491 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
523 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
Dexport.c112 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_get_name()
Dops_fstype.c582 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); in gfs2_jindex_hold()
696 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID, in init_statfs()
793 sdp->sd_jinode_gl = ip->i_gl; in init_journal()
794 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in init_journal()
967 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID, in init_per_node()
Dincore.h391 struct gfs2_glock *i_gl; member
Dglops.c601 struct gfs2_glock *j_gl = ip->i_gl; in freeze_go_xmote_bh()
Dglock.c725 if (gl == m_ip->i_gl) in is_system_glock()
994 inode_gl = ip->i_gl; in gfs2_try_evict()
Drgrp.c1037 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update()
2459 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_alloc_blocks()