Commit 65d6e954 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull gfs2 updates from Andreas Gruenbacher:

 - Fix a glock state (non-)transition bug when a dlm request times out
   and is canceled, and we have locking requests that can now be granted
   immediately

 - Various fixes and cleanups in how the logd and quotad daemons are
   woken up and terminated

 - Fix several bugs in the quota data reference counting and shrinking.
   Free quota data objects synchronously in put_super() instead of
   letting call_rcu() run wild

 - Make sure not to deallocate quota data during a withdraw; rather,
   defer quota data deallocation to put_super(). Withdraws can happen in
   contexts in which callers on the stack are holding quota data
   references

 - Many minor quota fixes and cleanups by Bob

 - Update the the mailing list address for gfs2 and dlm. (It's the same
   list for both and we are moving it to gfs2@lists.linux.dev)

 - Various other minor cleanups

* tag 'gfs2-v6.5-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (51 commits)
  MAINTAINERS: Update dlm mailing list
  MAINTAINERS: Update gfs2 mailing list
  gfs2: change qd_slot_count to qd_slot_ref
  gfs2: check for no eligible quota changes
  gfs2: Remove useless assignment
  gfs2: simplify slot_get
  gfs2: Simplify qd2offset
  gfs2: introduce qd_bh_get_or_undo
  gfs2: Remove quota allocation info from quota file
  gfs2: use constant for array size
  gfs2: Set qd_sync_gen in do_sync
  gfs2: Remove useless err set
  gfs2: Small gfs2_quota_lock cleanup
  gfs2: move qdsb_put and reduce redundancy
  gfs2: improvements to sysfs status
  gfs2: Don't try to sync non-changes
  gfs2: Simplify function need_sync
  gfs2: remove unneeded pg_oflow variable
  gfs2: remove unneeded variable done
  gfs2: pass sdp to gfs2_write_buf_to_page
  ...
parents 9e310ea5 2938fd75
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -20,8 +20,7 @@ The gl_holders list contains all the queued lock requests (not
just the holders) associated with the glock. If there are any
held locks, then they will be contiguous entries at the head
of the list. Locks are granted in strictly the order that they
are queued, except for those marked LM_FLAG_PRIORITY which are
used only during recovery, and even then only for journal locks.
are queued.

There are three lock states that users of the glock layer can request,
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
+2 −2
Original line number Diff line number Diff line
@@ -6118,7 +6118,7 @@ F: include/video/udlfb.h
DISTRIBUTED LOCK MANAGER (DLM)
M:	Christine Caulfield <ccaulfie@redhat.com>
M:	David Teigland <teigland@redhat.com>
L:	cluster-devel@redhat.com
L:	gfs2@lists.linux.dev
S:	Supported
W:	http://sources.redhat.com/cluster/
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git
@@ -8774,7 +8774,7 @@ F: scripts/get_maintainer.pl
GFS2 FILE SYSTEM
M:	Bob Peterson <rpeterso@redhat.com>
M:	Andreas Gruenbacher <agruenba@redhat.com>
L:	cluster-devel@redhat.com
L:	gfs2@lists.linux.dev
S:	Supported
B:	https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
+3 −4
Original line number Diff line number Diff line
@@ -183,13 +183,13 @@ static int gfs2_writepages(struct address_space *mapping,
	int ret;

	/*
	 * Even if we didn't write any pages here, we might still be holding
	 * Even if we didn't write enough pages here, we might still be holding
	 * dirty pages in the ail. We forcibly flush the ail because we don't
	 * want balance_dirty_pages() to loop indefinitely trying to write out
	 * pages held in the ail that it can't find.
	 */
	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
	if (ret == 0)
	if (ret == 0 && wbc->nr_to_write > 0)
		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
	return ret;
}
@@ -272,8 +272,7 @@ static int gfs2_write_jdata_batch(struct address_space *mapping,
				 * not be suitable for data integrity
				 * writeout).
				 */
				*done_index = folio->index +
					folio_nr_pages(folio);
				*done_index = folio_next_index(folio);
				ret = 1;
				break;
			}
+1 −1
Original line number Diff line number Diff line
@@ -161,7 +161,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip)
	int error;

	down_write(&ip->i_rw_mutex);
	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
	page = grab_cache_page(inode->i_mapping, 0);
	error = -ENOMEM;
	if (!page)
		goto out;
+19 −28
Original line number Diff line number Diff line
@@ -176,7 +176,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
	wake_up_glock(gl);
	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
		wake_up(&sdp->sd_kill_wait);
}

/**
@@ -468,10 +468,10 @@ int gfs2_instantiate(struct gfs2_holder *gh)
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
 * Returns: 1 if there is a blocked holder at the head of the list
 * Returns true on success (i.e., progress was made or there are no waiters).
 */

static int do_promote(struct gfs2_glock *gl)
static bool do_promote(struct gfs2_glock *gl)
{
	struct gfs2_holder *gh, *current_gh;

@@ -484,10 +484,10 @@ static int do_promote(struct gfs2_glock *gl)
			 * If we get here, it means we may not grant this
			 * holder for some reason. If this holder is at the
			 * head of the list, it means we have a blocked holder
			 * at the head, so return 1.
			 * at the head, so return false.
			 */
			if (list_is_first(&gh->gh_list, &gl->gl_holders))
				return 1;
				return false;
			do_error(gl, 0);
			break;
		}
@@ -497,7 +497,7 @@ static int do_promote(struct gfs2_glock *gl)
		if (!current_gh)
			current_gh = gh;
	}
	return 0;
	return true;
}

/**
@@ -591,10 +591,11 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
				list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				if (do_promote(gl))
					goto out;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
@@ -679,8 +680,7 @@ __acquires(&gl->gl_lockref.lock)
	    gh && !(gh->gh_flags & LM_FLAG_NOEXP))
		goto skip_inval;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
@@ -834,7 +834,7 @@ __acquires(&gl->gl_lockref.lock)
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
		if (do_promote(gl) == 0)
		if (do_promote(gl))
			goto out_unlock;
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
@@ -1022,7 +1022,7 @@ static void delete_work_func(struct work_struct *work)
		 * step entirely.
		 */
		if (gfs2_try_evict(gl)) {
			if (test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
			if (test_bit(SDF_KILL, &sdp->sd_flags))
				goto out;
			if (gfs2_queue_verify_evict(gl))
				return;
@@ -1035,7 +1035,7 @@ static void delete_work_func(struct work_struct *work)
					    GFS2_BLKST_UNLINKED);
		if (IS_ERR(inode)) {
			if (PTR_ERR(inode) == -EAGAIN &&
			    !test_bit(SDF_DEACTIVATING, &sdp->sd_flags) &&
			    !test_bit(SDF_KILL, &sdp->sd_flags) &&
			    gfs2_queue_verify_evict(gl))
				return;
		} else {
@@ -1231,7 +1231,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
out_free:
	gfs2_glock_dealloc(&gl->gl_rcu);
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
		wake_up(&sdp->sd_kill_wait);

out:
	return ret;
@@ -1515,27 +1515,20 @@ __acquires(&gl->gl_lockref.lock)
		}
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
	trace_gfs2_glock_queue(gh, 1);
	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
	spin_unlock(&gl->gl_lockref.lock);
	if (sdp->sd_lockstruct.ls_ops->lm_cancel)
		sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
	spin_lock(&gl->gl_lockref.lock);
	}
	return;

trap_recursive:
@@ -2195,7 +2188,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
	flush_workqueue(glock_workqueue);
	glock_hash_walk(clear_glock, sdp);
	flush_workqueue(glock_workqueue);
	wait_event_timeout(sdp->sd_glock_wait,
	wait_event_timeout(sdp->sd_kill_wait,
			   atomic_read(&sdp->sd_glock_disposal) == 0,
			   HZ * 600);
	glock_hash_walk(dump_glock_func, sdp);
@@ -2227,8 +2220,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
		*p++ = 'e';
	if (flags & LM_FLAG_ANY)
		*p++ = 'A';
	if (flags & LM_FLAG_PRIORITY)
		*p++ = 'p';
	if (flags & LM_FLAG_NODE_SCOPE)
		*p++ = 'n';
	if (flags & GL_ASYNC)
Loading