Loading fs/gfs2/export.c +3 −1 Original line number Diff line number Diff line Loading @@ -134,7 +134,9 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, struct gfs2_sbd *sdp = sb->s_fs_info; struct inode *inode; inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino, if (!inum->no_formal_ino) return ERR_PTR(-ESTALE); inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino, GFS2_BLKST_DINODE); if (IS_ERR(inode)) return ERR_CAST(inode); Loading fs/gfs2/glock.c +165 −11 Original line number Diff line number Diff line Loading @@ -125,13 +125,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) { struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); if (gl->gl_ops->go_flags & GLOF_ASPACE) { kmem_cache_free(gfs2_glock_aspace_cachep, gl); } else { kfree(gl->gl_lksb.sb_lvbptr); if (gl->gl_ops->go_flags & GLOF_ASPACE) kmem_cache_free(gfs2_glock_aspace_cachep, gl); else kmem_cache_free(gfs2_glock_cachep, gl); } } /** * glock_blocked_by_withdraw - determine if we can still use a glock Loading Loading @@ -465,6 +464,15 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) gl->gl_tchange = jiffies; } static void gfs2_set_demote(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; set_bit(GLF_DEMOTE, &gl->gl_flags); smp_mb(); wake_up(&sdp->sd_async_glock_wait); } static void gfs2_demote_wake(struct gfs2_glock *gl) { gl->gl_demote_state = LM_ST_EXCLUSIVE; Loading Loading @@ -757,20 +765,127 @@ __acquires(&gl->gl_lockref.lock) return; } void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) { struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; if (ri->ri_magic == 0) ri->ri_magic = cpu_to_be32(GFS2_MAGIC); if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) ri->ri_generation_deleted = cpu_to_be64(generation); } bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) { struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) return false; return generation <= be64_to_cpu(ri->ri_generation_deleted); } static void gfs2_glock_poke(struct gfs2_glock *gl) { int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; struct gfs2_holder gh; int error; error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh); if (!error) gfs2_glock_dq(&gh); } static bool gfs2_try_evict(struct gfs2_glock *gl) { struct gfs2_inode *ip; bool evicted = false; /* * If there is contention on the iopen glock and we have an inode, try * to grab and release the inode so that it can be evicted. This will * allow the remote node to go ahead and delete the inode without us * having to do it, which will avoid rgrp glock thrashing. * * The remote node is likely still holding the corresponding inode * glock, so it will run before we get to verify that the delete has * happened below. */ spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; if (ip && !igrab(&ip->i_inode)) ip = NULL; spin_unlock(&gl->gl_lockref.lock); if (ip) { struct gfs2_glock *inode_gl = NULL; gl->gl_no_formal_ino = ip->i_no_formal_ino; set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); d_prune_aliases(&ip->i_inode); iput(&ip->i_inode); /* If the inode was evicted, gl->gl_object will now be NULL. */ spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; if (ip) { inode_gl = ip->i_gl; lockref_get(&inode_gl->gl_lockref); clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); } spin_unlock(&gl->gl_lockref.lock); if (inode_gl) { gfs2_glock_poke(inode_gl); gfs2_glock_put(inode_gl); } evicted = !ip; } return evicted; } static void delete_work_func(struct work_struct *work) { struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); struct delayed_work *dwork = to_delayed_work(work); struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct inode *inode; u64 no_addr = gl->gl_name.ln_number; spin_lock(&gl->gl_lockref.lock); clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); /* If someone's using this glock to create a new dinode, the block must have been freed by another node, then re-used, in which case our iopen callback is too late after the fact. Ignore it. */ if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) goto out; inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { /* * If we can evict the inode, give the remote node trying to * delete the inode some time before verifying that the delete * has happened. Otherwise, if we cause contention on the inode glock * immediately, the remote node will think that we still have * the inode in use, and so it will give up waiting. * * If we can't evict the inode, signal to the remote node that * the inode is still in use. We'll later try to delete the * inode locally in gfs2_evict_inode. * * FIXME: We only need to verify that the remote node has * deleted the inode because nodes before this remote delete * rework won't cooperate. At a later time, when we no longer * care about compatibility with such nodes, we can skip this * step entirely. */ if (gfs2_try_evict(gl)) { if (gfs2_queue_delete_work(gl, 5 * HZ)) return; } goto out; } inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, GFS2_BLKST_UNLINKED); if (!IS_ERR_OR_NULL(inode)) { d_prune_aliases(inode); iput(inode); Loading Loading @@ -801,7 +916,7 @@ static void glock_work_func(struct work_struct *work) if (!delay) { clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); set_bit(GLF_DEMOTE, &gl->gl_flags); gfs2_set_demote(gl); } } run_queue(gl, 0); Loading Loading @@ -932,7 +1047,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_object = NULL; gl->gl_hold_time = GL_GLOCK_DFT_HOLD; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); INIT_WORK(&gl->gl_delete, delete_work_func); INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); mapping = gfs2_glock2aspace(gl); if (mapping) { Loading Loading @@ -1146,9 +1261,10 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) static void handle_callback(struct gfs2_glock *gl, unsigned int state, unsigned long delay, bool remote) { int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; set_bit(bit, &gl->gl_flags); if (delay) set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); else gfs2_set_demote(gl); if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl->gl_demote_state = state; gl->gl_demote_time = jiffies; Loading Loading @@ -1755,6 +1871,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) rhashtable_walk_exit(&iter); } bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) { bool queued; spin_lock(&gl->gl_lockref.lock); queued = queue_delayed_work(gfs2_delete_workqueue, &gl->gl_delete, delay); if (queued) set_bit(GLF_PENDING_DELETE, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); return queued; } void gfs2_cancel_delete_work(struct gfs2_glock *gl) { if (cancel_delayed_work_sync(&gl->gl_delete)) { clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); gfs2_glock_put(gl); } } bool gfs2_delete_work_queued(const struct gfs2_glock *gl) { return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); } static void flush_delete_work(struct gfs2_glock *gl) { flush_delayed_work(&gl->gl_delete); gfs2_glock_queue_work(gl, 0); } void gfs2_flush_delete_work(struct gfs2_sbd *sdp) { glock_hash_walk(flush_delete_work, sdp); flush_workqueue(gfs2_delete_workqueue); } /** * thaw_glock - thaw out a glock which has an unprocessed reply waiting * @gl: The glock to thaw Loading fs/gfs2/glock.h +7 −0 Original line number Diff line number Diff line Loading @@ -244,6 +244,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret); extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay); extern void gfs2_cancel_delete_work(struct gfs2_glock *gl); extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl); extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp); extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip); extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); Loading Loading @@ -315,4 +319,7 @@ static inline void glock_clear_object(struct gfs2_glock *gl, void *object) spin_unlock(&gl->gl_lockref.lock); } extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); #endif /* __GLOCK_DOT_H__ */ fs/gfs2/glops.c +9 −2 Original line number Diff line number Diff line Loading @@ -612,11 +612,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) if (gl->gl_demote_state == LM_ST_UNLOCKED && gl->gl_state == LM_ST_SHARED && ip) { gl->gl_lockref.count++; if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) if (!queue_delayed_work(gfs2_delete_workqueue, &gl->gl_delete, 0)) gl->gl_lockref.count--; } } static int iopen_go_demote_ok(const struct gfs2_glock *gl) { return !gfs2_delete_work_queued(gl); } /** * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it * @gl: glock being freed Loading Loading @@ -696,7 +702,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = { .go_lock = inode_go_lock, .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, .go_flags = GLOF_ASPACE | GLOF_LRU, .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, .go_free = inode_go_free, }; Loading @@ -720,6 +726,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = { const struct gfs2_glock_operations gfs2_iopen_glops = { .go_type = LM_TYPE_IOPEN, .go_callback = iopen_go_callback, .go_demote_ok = iopen_go_demote_ok, .go_flags = GLOF_LRU | GLOF_NONDISK, }; Loading fs/gfs2/incore.h +7 −2 Original line number Diff line number Diff line Loading @@ -345,6 +345,7 @@ enum { GLF_OBJECT = 14, /* Used only for tracing */ GLF_BLOCKING = 15, GLF_INODE_CREATING = 16, /* Inode creation occurring */ GLF_PENDING_DELETE = 17, GLF_FREEING = 18, /* Wait for glock to be freed */ }; Loading Loading @@ -378,8 +379,11 @@ struct gfs2_glock { atomic_t gl_revokes; struct delayed_work gl_work; union { /* For inode and iopen glocks only */ struct work_struct gl_delete; /* For iopen glocks only */ struct { struct delayed_work gl_delete; u64 gl_no_formal_ino; }; /* For rgrp glocks only */ struct { loff_t start; Loading @@ -398,6 +402,7 @@ enum { GIF_ORDERED = 4, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, GIF_DEFERRED_DELETE = 7, }; struct gfs2_inode { Loading Loading
fs/gfs2/export.c +3 −1 Original line number Diff line number Diff line Loading @@ -134,7 +134,9 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, struct gfs2_sbd *sdp = sb->s_fs_info; struct inode *inode; inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino, if (!inum->no_formal_ino) return ERR_PTR(-ESTALE); inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino, GFS2_BLKST_DINODE); if (IS_ERR(inode)) return ERR_CAST(inode); Loading
fs/gfs2/glock.c +165 −11 Original line number Diff line number Diff line Loading @@ -125,13 +125,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) { struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); if (gl->gl_ops->go_flags & GLOF_ASPACE) { kmem_cache_free(gfs2_glock_aspace_cachep, gl); } else { kfree(gl->gl_lksb.sb_lvbptr); if (gl->gl_ops->go_flags & GLOF_ASPACE) kmem_cache_free(gfs2_glock_aspace_cachep, gl); else kmem_cache_free(gfs2_glock_cachep, gl); } } /** * glock_blocked_by_withdraw - determine if we can still use a glock Loading Loading @@ -465,6 +464,15 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) gl->gl_tchange = jiffies; } static void gfs2_set_demote(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; set_bit(GLF_DEMOTE, &gl->gl_flags); smp_mb(); wake_up(&sdp->sd_async_glock_wait); } static void gfs2_demote_wake(struct gfs2_glock *gl) { gl->gl_demote_state = LM_ST_EXCLUSIVE; Loading Loading @@ -757,20 +765,127 @@ __acquires(&gl->gl_lockref.lock) return; } void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) { struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; if (ri->ri_magic == 0) ri->ri_magic = cpu_to_be32(GFS2_MAGIC); if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) ri->ri_generation_deleted = cpu_to_be64(generation); } bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) { struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) return false; return generation <= be64_to_cpu(ri->ri_generation_deleted); } static void gfs2_glock_poke(struct gfs2_glock *gl) { int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; struct gfs2_holder gh; int error; error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh); if (!error) gfs2_glock_dq(&gh); } static bool gfs2_try_evict(struct gfs2_glock *gl) { struct gfs2_inode *ip; bool evicted = false; /* * If there is contention on the iopen glock and we have an inode, try * to grab and release the inode so that it can be evicted. This will * allow the remote node to go ahead and delete the inode without us * having to do it, which will avoid rgrp glock thrashing. * * The remote node is likely still holding the corresponding inode * glock, so it will run before we get to verify that the delete has * happened below. */ spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; if (ip && !igrab(&ip->i_inode)) ip = NULL; spin_unlock(&gl->gl_lockref.lock); if (ip) { struct gfs2_glock *inode_gl = NULL; gl->gl_no_formal_ino = ip->i_no_formal_ino; set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); d_prune_aliases(&ip->i_inode); iput(&ip->i_inode); /* If the inode was evicted, gl->gl_object will now be NULL. */ spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; if (ip) { inode_gl = ip->i_gl; lockref_get(&inode_gl->gl_lockref); clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); } spin_unlock(&gl->gl_lockref.lock); if (inode_gl) { gfs2_glock_poke(inode_gl); gfs2_glock_put(inode_gl); } evicted = !ip; } return evicted; } static void delete_work_func(struct work_struct *work) { struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); struct delayed_work *dwork = to_delayed_work(work); struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct inode *inode; u64 no_addr = gl->gl_name.ln_number; spin_lock(&gl->gl_lockref.lock); clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); /* If someone's using this glock to create a new dinode, the block must have been freed by another node, then re-used, in which case our iopen callback is too late after the fact. Ignore it. */ if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) goto out; inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { /* * If we can evict the inode, give the remote node trying to * delete the inode some time before verifying that the delete * has happened. Otherwise, if we cause contention on the inode glock * immediately, the remote node will think that we still have * the inode in use, and so it will give up waiting. * * If we can't evict the inode, signal to the remote node that * the inode is still in use. We'll later try to delete the * inode locally in gfs2_evict_inode. * * FIXME: We only need to verify that the remote node has * deleted the inode because nodes before this remote delete * rework won't cooperate. At a later time, when we no longer * care about compatibility with such nodes, we can skip this * step entirely. */ if (gfs2_try_evict(gl)) { if (gfs2_queue_delete_work(gl, 5 * HZ)) return; } goto out; } inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, GFS2_BLKST_UNLINKED); if (!IS_ERR_OR_NULL(inode)) { d_prune_aliases(inode); iput(inode); Loading Loading @@ -801,7 +916,7 @@ static void glock_work_func(struct work_struct *work) if (!delay) { clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); set_bit(GLF_DEMOTE, &gl->gl_flags); gfs2_set_demote(gl); } } run_queue(gl, 0); Loading Loading @@ -932,7 +1047,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_object = NULL; gl->gl_hold_time = GL_GLOCK_DFT_HOLD; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); INIT_WORK(&gl->gl_delete, delete_work_func); INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); mapping = gfs2_glock2aspace(gl); if (mapping) { Loading Loading @@ -1146,9 +1261,10 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) static void handle_callback(struct gfs2_glock *gl, unsigned int state, unsigned long delay, bool remote) { int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; set_bit(bit, &gl->gl_flags); if (delay) set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); else gfs2_set_demote(gl); if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl->gl_demote_state = state; gl->gl_demote_time = jiffies; Loading Loading @@ -1755,6 +1871,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) rhashtable_walk_exit(&iter); } bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) { bool queued; spin_lock(&gl->gl_lockref.lock); queued = queue_delayed_work(gfs2_delete_workqueue, &gl->gl_delete, delay); if (queued) set_bit(GLF_PENDING_DELETE, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); return queued; } void gfs2_cancel_delete_work(struct gfs2_glock *gl) { if (cancel_delayed_work_sync(&gl->gl_delete)) { clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); gfs2_glock_put(gl); } } bool gfs2_delete_work_queued(const struct gfs2_glock *gl) { return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); } static void flush_delete_work(struct gfs2_glock *gl) { flush_delayed_work(&gl->gl_delete); gfs2_glock_queue_work(gl, 0); } void gfs2_flush_delete_work(struct gfs2_sbd *sdp) { glock_hash_walk(flush_delete_work, sdp); flush_workqueue(gfs2_delete_workqueue); } /** * thaw_glock - thaw out a glock which has an unprocessed reply waiting * @gl: The glock to thaw Loading
fs/gfs2/glock.h +7 −0 Original line number Diff line number Diff line Loading @@ -244,6 +244,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret); extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay); extern void gfs2_cancel_delete_work(struct gfs2_glock *gl); extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl); extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp); extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip); extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); Loading Loading @@ -315,4 +319,7 @@ static inline void glock_clear_object(struct gfs2_glock *gl, void *object) spin_unlock(&gl->gl_lockref.lock); } extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); #endif /* __GLOCK_DOT_H__ */
fs/gfs2/glops.c +9 −2 Original line number Diff line number Diff line Loading @@ -612,11 +612,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) if (gl->gl_demote_state == LM_ST_UNLOCKED && gl->gl_state == LM_ST_SHARED && ip) { gl->gl_lockref.count++; if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) if (!queue_delayed_work(gfs2_delete_workqueue, &gl->gl_delete, 0)) gl->gl_lockref.count--; } } static int iopen_go_demote_ok(const struct gfs2_glock *gl) { return !gfs2_delete_work_queued(gl); } /** * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it * @gl: glock being freed Loading Loading @@ -696,7 +702,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = { .go_lock = inode_go_lock, .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, .go_flags = GLOF_ASPACE | GLOF_LRU, .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, .go_free = inode_go_free, }; Loading @@ -720,6 +726,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = { const struct gfs2_glock_operations gfs2_iopen_glops = { .go_type = LM_TYPE_IOPEN, .go_callback = iopen_go_callback, .go_demote_ok = iopen_go_demote_ok, .go_flags = GLOF_LRU | GLOF_NONDISK, }; Loading
fs/gfs2/incore.h +7 −2 Original line number Diff line number Diff line Loading @@ -345,6 +345,7 @@ enum { GLF_OBJECT = 14, /* Used only for tracing */ GLF_BLOCKING = 15, GLF_INODE_CREATING = 16, /* Inode creation occurring */ GLF_PENDING_DELETE = 17, GLF_FREEING = 18, /* Wait for glock to be freed */ }; Loading Loading @@ -378,8 +379,11 @@ struct gfs2_glock { atomic_t gl_revokes; struct delayed_work gl_work; union { /* For inode and iopen glocks only */ struct work_struct gl_delete; /* For iopen glocks only */ struct { struct delayed_work gl_delete; u64 gl_no_formal_ino; }; /* For rgrp glocks only */ struct { loff_t start; Loading @@ -398,6 +402,7 @@ enum { GIF_ORDERED = 4, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, GIF_DEFERRED_DELETE = 7, }; struct gfs2_inode { Loading