Commit 2b73a2c8 authored by Darrick J. Wong's avatar Darrick J. Wong
Browse files

xfs: clear log incompat feature bits when the log is idle



When there are no ongoing transactions and the log contents have been
checkpointed back into the filesystem, the log performs 'covering',
which is to say that it log a dummy transaction to record the fact that
the tail has caught up with the head.  This is a good time to clear log
incompat feature flags, because they are flags that are temporarily set
to limit the range of kernels that can replay a dirty log.

Since it's possible that some other higher level thread is about to
start logging items protected by a log incompat flag, we create a rwsem
so that upper level threads can coordinate this with the log.  It would
probably be more performant to use a percpu rwsem, but the ability to
/try/ taking the write lock during covering is critical, and percpu
rwsems do not provide that.

Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarAllison Henderson <allison.henderson@oracle.com>
Reviewed-by: default avatarChandan Babu R <chandanrlinux@gmail.com>
parent 908ce71e
Loading
Loading
Loading
Loading
+49 −0
Original line number Diff line number Diff line
@@ -1362,6 +1362,32 @@ xfs_log_work_queue(
				msecs_to_jiffies(xfs_syncd_centisecs * 10));
}

/*
 * Clear the log incompat flags if we have the opportunity.
 *
 * This only happens if we're about to log the second dummy transaction as part
 * of covering the log and we can get the log incompat feature usage lock.
 */
static inline void
xlog_clear_incompat(
	struct xlog		*log)
{
	struct xfs_mount	*mp = log->l_mp;

	if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
				XFS_SB_FEAT_INCOMPAT_LOG_ALL))
		return;

	if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
		return;

	if (!down_write_trylock(&log->l_incompat_users))
		return;

	xfs_clear_incompat_log_features(mp);
	up_write(&log->l_incompat_users);
}

/*
 * Every sync period we need to unpin all items in the AIL and push them to
 * disk. If there is nothing dirty, then we might need to cover the log to
@@ -1388,6 +1414,7 @@ xfs_log_worker(
		 * synchronously log the superblock instead to ensure the
		 * superblock is immediately unpinned and can be written back.
		 */
		xlog_clear_incompat(log);
		xfs_sync_sb(mp, true);
	} else
		xfs_log_force(mp, 0);
@@ -1475,6 +1502,8 @@ xlog_alloc_log(
	}
	log->l_sectBBsize = 1 << log2_size;

	init_rwsem(&log->l_incompat_users);

	xlog_get_iclog_buffer_size(mp, log);

	spin_lock_init(&log->l_icloglock);
@@ -3973,3 +4002,23 @@ xfs_log_in_recovery(

	return log->l_flags & XLOG_ACTIVE_RECOVERY;
}

/*
 * Notify the log that we're about to start using a feature that is protected
 * by a log incompat feature flag.  This will prevent log covering from
 * clearing those flags.
 */
void
xlog_use_incompat_feat(
	struct xlog		*log)
{
	down_read(&log->l_incompat_users);
}

/* Notify the log that we've finished using log incompat features. */
void
xlog_drop_incompat_feat(
	struct xlog		*log)
{
	up_read(&log->l_incompat_users);
}
+3 −0
Original line number Diff line number Diff line
@@ -142,4 +142,7 @@ bool xfs_log_in_recovery(struct xfs_mount *);

xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);

void xlog_use_incompat_feat(struct xlog *log);
void xlog_drop_incompat_feat(struct xlog *log);

#endif	/* __XFS_LOG_H__ */
+3 −0
Original line number Diff line number Diff line
@@ -456,6 +456,9 @@ struct xlog {
	xfs_lsn_t		l_recovery_lsn;

	uint32_t		l_iclog_roundoff;/* padding roundoff */

	/* Users of log incompat features should take a read lock. */
	struct rw_semaphore	l_incompat_users;
};

#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \