Commit 94c76955 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull gfs2 updates from Andreas Gruenbacher:

 - Move the freeze/thaw logic from glock callback context to process /
   worker thread context to prevent deadlocks

 - Fix a quota reference couting bug in do_qc()

 - Carry on deallocating inodes even when gfs2_rindex_update() fails

 - Retry filesystem-internal reads when they are interruped by a signal

 - Eliminate kmap_atomic() in favor of kmap_local_page() /
   memcpy_{from,to}_page()

 - Get rid of noop_direct_IO

 - And a few more minor fixes and cleanups

* tag 'gfs2-v6.4-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (23 commits)
  gfs2: Add quota_change type
  gfs2: Use memcpy_{from,to}_page where appropriate
  gfs2: Convert remaining kmap_atomic calls to kmap_local_page
  gfs2: Replace deprecated kmap_atomic with kmap_local_page
  gfs: Get rid of unnucessary locking in inode_go_dump
  gfs2: gfs2_freeze_lock_shared cleanup
  gfs2: Replace sd_freeze_state with SDF_FROZEN flag
  gfs2: Rework freeze / thaw logic
  gfs2: Rename SDF_{FS_FROZEN => FREEZE_INITIATOR}
  gfs2: Reconfiguring frozen filesystem already rejected
  gfs2: Rename gfs2_freeze_lock{ => _shared }
  gfs2: Rename the {freeze,thaw}_super callbacks
  gfs2: Rename remaining "transaction" glock references
  gfs2: retry interrupted internal reads
  gfs2: Fix possible data races in gfs2_show_options()
  gfs2: Fix duplicate should_fault_in_pages() call
  gfs2: set FMODE_CAN_ODIRECT instead of a dummy direct_IO method
  gfs2: Don't remember delete unless it's successful
  gfs2: Update rl_unlinked before releasing rgrp lock
  gfs2: Fix gfs2_qa_get imbalance in gfs2_quota_hold
  ...
parents ccf46d85 432928c9
Loading
Loading
Loading
Loading
+9 −10
Original line number Diff line number Diff line
@@ -432,10 +432,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
	if (error)
		return error;

	kaddr = kmap_atomic(page);
	kaddr = kmap_local_page(page);
	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
	kunmap_atomic(kaddr);
	kunmap_local(kaddr);
	flush_dcache_page(page);
	brelse(dibh);
	SetPageUptodate(page);
@@ -489,18 +489,18 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
	unsigned copied = 0;
	unsigned amt;
	struct page *page;
	void *p;

	do {
		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
		if (IS_ERR(page)) {
			if (PTR_ERR(page) == -EINTR)
				continue;
			return PTR_ERR(page);
		}
		amt = size - copied;
		if (offset + size > PAGE_SIZE)
			amt = PAGE_SIZE - offset;
		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
		if (IS_ERR(page))
			return PTR_ERR(page);
		p = kmap_atomic(page);
		memcpy(buf + copied, p + offset, amt);
		kunmap_atomic(p);
		memcpy_from_page(buf + copied, page, offset, amt);
		put_page(page);
		copied += amt;
		index++;
@@ -751,7 +751,6 @@ static const struct address_space_operations gfs2_aops = {
	.release_folio = iomap_release_folio,
	.invalidate_folio = iomap_invalidate_folio,
	.bmap = gfs2_bmap,
	.direct_IO = noop_direct_IO,
	.migrate_folio = filemap_migrate_folio,
	.is_partially_uptodate = iomap_is_partially_uptodate,
	.error_remove_page = generic_error_remove_page,
+2 −2
Original line number Diff line number Diff line
@@ -1729,8 +1729,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)

	if (offset >= maxsize) {
		/*
		 * The starting point lies beyond the allocated meta-data;
		 * there are no blocks do deallocate.
		 * The starting point lies beyond the allocated metadata;
		 * there are no blocks to deallocate.
		 */
		return 0;
	}
+4 −1
Original line number Diff line number Diff line
@@ -630,6 +630,9 @@ int gfs2_open_common(struct inode *inode, struct file *file)
		ret = generic_file_open(inode, file);
		if (ret)
			return ret;

		if (!gfs2_is_jdata(GFS2_I(inode)))
			file->f_mode |= FMODE_CAN_ODIRECT;
	}

	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
@@ -1030,8 +1033,8 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
	}

	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
retry:
	if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
retry:
		window_size -= fault_in_iov_iter_readable(from, window_size);
		if (!window_size) {
			ret = -EFAULT;
+2 −2
Original line number Diff line number Diff line
@@ -145,8 +145,8 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
 *
 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
 * when we're withdrawn. For example, to maintain metadata integrity, we should
 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
 * iopen or the transaction glocks may be safely used because none of their
 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
 * the iopen or freeze glock may be safely used because none of their
 * metadata goes through the journal. So in general, we should disallow all
 * glocks that are journaled, and allow all the others. One exception is:
 * we need to allow our active journal to be promoted and demoted so others
+25 −44
Original line number Diff line number Diff line
@@ -236,7 +236,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
	truncate_inode_pages_range(mapping, start, end);
}

static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
			      const char *fs_id_buf)
{
	struct gfs2_rgrpd *rgd = gl->gl_object;
@@ -536,72 +536,53 @@ static int inode_go_held(struct gfs2_holder *gh)
 *
 */

static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
			  const char *fs_id_buf)
{
	struct gfs2_inode *ip = gl->gl_object;
	struct inode *inode;
	unsigned long nrpages;
	const struct inode *inode = &ip->i_inode;

	if (ip == NULL)
		return;

	inode = &ip->i_inode;
	xa_lock_irq(&inode->i_data.i_pages);
	nrpages = inode->i_data.nrpages;
	xa_unlock_irq(&inode->i_data.i_pages);

	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
		       "p:%lu\n", fs_id_buf,
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  IF2DT(inode->i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
		  (unsigned long long)i_size_read(inode), nrpages);
		  (unsigned long long)i_size_read(inode),
		  inode->i_data.nrpages);
}

/**
 * freeze_go_sync - promote/demote the freeze glock
 * freeze_go_callback - A cluster node is requesting a freeze
 * @gl: the glock
 * @remote: true if this came from a different cluster node
 */

static int freeze_go_sync(struct gfs2_glock *gl)
static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
{
	int error = 0;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct super_block *sb = sdp->sd_vfs;

	if (!remote ||
	    gl->gl_state != LM_ST_SHARED ||
	    gl->gl_demote_state != LM_ST_UNLOCKED)
		return;

	/*
	 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
	 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
	 * all the nodes should have the freeze glock in SH mode and they all
	 * call do_xmote: One for EX and the others for UN. They ALL must
	 * freeze locally, and they ALL must queue freeze work. The freeze_work
	 * calls freeze_func, which tries to reacquire the freeze glock in SH,
	 * effectively waiting for the thaw on the node who holds it in EX.
	 * Once thawed, the work func acquires the freeze glock in
	 * SH and everybody goes back to thawed.
	 * Try to get an active super block reference to prevent racing with
	 * unmount (see trylock_super()).  But note that unmount isn't the only
	 * place where a write lock on s_umount is taken, and we can fail here
	 * because of things like remount as well.
	 */
	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
	    !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
		error = freeze_super(sdp->sd_vfs);
		if (error) {
			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
				error);
			if (gfs2_withdrawn(sdp)) {
				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
				return 0;
			}
			gfs2_assert_withdraw(sdp, 0);
	if (down_read_trylock(&sb->s_umount)) {
		atomic_inc(&sb->s_active);
		up_read(&sb->s_umount);
		if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
			deactivate_super(sb);
	}
		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
		if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
				       GFS2_LFC_FREEZE_GO_SYNC);
		else /* read-only mounts */
			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
	}
	return 0;
}

/**
@@ -761,9 +742,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
};

const struct gfs2_glock_operations gfs2_freeze_glops = {
	.go_sync = freeze_go_sync,
	.go_xmote_bh = freeze_go_xmote_bh,
	.go_demote_ok = freeze_go_demote_ok,
	.go_callback = freeze_go_callback,
	.go_type = LM_TYPE_NONDISK,
	.go_flags = GLOF_NONDISK,
};
Loading