Unverified Commit 7fb44d00 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!1388 [sync] PR-1312: quota: fix race condition between dqput() and dquot_mark_dquot_dirty()

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/1312 
 
PR sync from: Baokun Li <libaokun1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/7ATD3RNUBURBEYA34VGOOZB53J377OZQ/ 
Baokun Li (5):
  quota: factor out dquot_write_dquot()
  quota: rename dquot_active() to inode_quota_active()
  quota: add new helper dquot_active()
  quota: fix dqput() to follow the guarantees dquot_srcu should provide
  quota: simplify drop_dquot_ref()


-- 
2.31.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/1388

 

Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents ea8877a9 654ee4fc
Loading
Loading
Loading
Loading
+125 −119
Original line number Diff line number Diff line
@@ -225,13 +225,22 @@ static void put_quota_format(struct quota_format_type *fmt)

/*
 * Dquot List Management:
 * The quota code uses four lists for dquot management: the inuse_list,
 * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
 * structure may be on some of those lists, depending on its current state.
 * The quota code uses five lists for dquot management: the inuse_list,
 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
 * A single dquot structure may be on some of those lists, depending on
 * its current state.
 *
 * All dquots are placed to the end of inuse_list when first created, and this
 * list is used for invalidate operation, which must look at every dquot.
 *
 * When the last reference of a dquot will be dropped, the dquot will be
 * added to releasing_dquots. We'd then queue work item which would call
 * synchronize_srcu() and after that perform the final cleanup of all the
 * dquots on the list. Both releasing_dquots and free_dquots use the
 * dq_free list_head in the dquot struct. When a dquot is removed from
 * releasing_dquots, a reference count is always subtracted, and if
 * dq_count == 0 at that point, the dquot will be added to the free_dquots.
 *
 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
 * and this list is searched whenever we need an available dquot.  Dquots are
 * removed from the list as soon as they are used again, and
@@ -250,6 +259,7 @@ static void put_quota_format(struct quota_format_type *fmt)

static LIST_HEAD(inuse_list);
static LIST_HEAD(free_dquots);
static LIST_HEAD(releasing_dquots);
static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;

@@ -260,6 +270,9 @@ static qsize_t inode_get_rsv_space(struct inode *inode);
static qsize_t __inode_get_rsv_space(struct inode *inode);
static int __dquot_initialize(struct inode *inode, int type);

static void quota_release_workfn(struct work_struct *work);
static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);

static inline unsigned int
hashfn(const struct super_block *sb, struct kqid qid)
{
@@ -307,11 +320,17 @@ static inline void put_dquot_last(struct dquot *dquot)
	dqstats_inc(DQST_FREE_DQUOTS);
}

static inline void put_releasing_dquots(struct dquot *dquot)
{
	list_add_tail(&dquot->dq_free, &releasing_dquots);
}

static inline void remove_free_dquot(struct dquot *dquot)
{
	if (list_empty(&dquot->dq_free))
		return;
	list_del_init(&dquot->dq_free);
	if (!atomic_read(&dquot->dq_count))
		dqstats_dec(DQST_FREE_DQUOTS);
}

@@ -338,6 +357,11 @@ static void wait_on_dquot(struct dquot *dquot)
	mutex_unlock(&dquot->dq_lock);
}

static inline int dquot_active(struct dquot *dquot)
{
	return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
}

static inline int dquot_dirty(struct dquot *dquot)
{
	return test_bit(DQ_MOD_B, &dquot->dq_flags);
@@ -353,14 +377,14 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
{
	int ret = 1;

	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
	if (!dquot_active(dquot))
		return 0;

	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);

	/* If quota is dirty already, we don't have to acquire dq_list_lock */
	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
	if (dquot_dirty(dquot))
		return 1;

	spin_lock(&dq_list_lock);
@@ -442,7 +466,7 @@ int dquot_acquire(struct dquot *dquot)
	smp_mb__before_atomic();
	set_bit(DQ_READ_B, &dquot->dq_flags);
	/* Instantiate dquot if needed */
	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
	if (!dquot_active(dquot) && !dquot->dq_off) {
		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
		/* Write the info if needed */
		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
@@ -484,7 +508,7 @@ int dquot_commit(struct dquot *dquot)
		goto out_lock;
	/* Inactive dquot can be only if there was error during read/init
	 * => we have better not writing it */
	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
	if (dquot_active(dquot))
		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
	else
		ret = -EIO;
@@ -549,6 +573,8 @@ static void invalidate_dquots(struct super_block *sb, int type)
	struct dquot *dquot, *tmp;

restart:
	flush_delayed_work(&quota_release_work);

	spin_lock(&dq_list_lock);
	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
		if (dquot->dq_sb != sb)
@@ -557,6 +583,12 @@ static void invalidate_dquots(struct super_block *sb, int type)
			continue;
		/* Wait for dquot users */
		if (atomic_read(&dquot->dq_count)) {
			/* dquot in releasing_dquots, flush and retry */
			if (!list_empty(&dquot->dq_free)) {
				spin_unlock(&dq_list_lock);
				goto restart;
			}

			dqgrab(dquot);
			spin_unlock(&dq_list_lock);
			/*
@@ -599,7 +631,7 @@ int dquot_scan_active(struct super_block *sb,

	spin_lock(&dq_list_lock);
	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
		if (!dquot_active(dquot))
			continue;
		if (dquot->dq_sb != sb)
			continue;
@@ -614,7 +646,7 @@ int dquot_scan_active(struct super_block *sb,
		 * outstanding call and recheck the DQ_ACTIVE_B after that.
		 */
		wait_on_dquot(dquot);
		if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
		if (dquot_active(dquot)) {
			ret = fn(dquot, priv);
			if (ret < 0)
				goto out;
@@ -630,6 +662,18 @@ int dquot_scan_active(struct super_block *sb,
}
EXPORT_SYMBOL(dquot_scan_active);

static inline int dquot_write_dquot(struct dquot *dquot)
{
	int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
	if (ret < 0) {
		quota_error(dquot->dq_sb, "Can't write quota structure "
			    "(error %d). Quota may get out of sync!", ret);
		/* Clear dirty bit anyway to avoid infinite loop. */
		clear_dquot_dirty(dquot);
	}
	return ret;
}

/* Write all dquot structures to quota files */
int dquot_writeback_dquots(struct super_block *sb, int type)
{
@@ -653,23 +697,16 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
			dquot = list_first_entry(&dirty, struct dquot,
						 dq_dirty);

			WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
			WARN_ON(!dquot_active(dquot));

			/* Now we have active dquot from which someone is
 			 * holding reference so we can safely just increase
			 * use count */
			dqgrab(dquot);
			spin_unlock(&dq_list_lock);
			err = sb->dq_op->write_dquot(dquot);
			if (err) {
				/*
				 * Clear dirty bit anyway to avoid infinite
				 * loop here.
				 */
				clear_dquot_dirty(dquot);
				if (!ret)
			err = dquot_write_dquot(dquot);
			if (err && !ret)
				ret = err;
			}
			dqput(dquot);
			spin_lock(&dq_list_lock);
		}
@@ -762,13 +799,54 @@ static struct shrinker dqcache_shrinker = {
	.seeks = DEFAULT_SEEKS,
};

/*
 * Safely release dquot and put reference to dquot.
 */
static void quota_release_workfn(struct work_struct *work)
{
	struct dquot *dquot;
	struct list_head rls_head;

	spin_lock(&dq_list_lock);
	/* Exchange the list head to avoid livelock. */
	list_replace_init(&releasing_dquots, &rls_head);
	spin_unlock(&dq_list_lock);

restart:
	synchronize_srcu(&dquot_srcu);
	spin_lock(&dq_list_lock);
	while (!list_empty(&rls_head)) {
		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
		/* Dquot got used again? */
		if (atomic_read(&dquot->dq_count) > 1) {
			remove_free_dquot(dquot);
			atomic_dec(&dquot->dq_count);
			continue;
		}
		if (dquot_dirty(dquot)) {
			spin_unlock(&dq_list_lock);
			/* Commit dquot before releasing */
			dquot_write_dquot(dquot);
			goto restart;
		}
		if (dquot_active(dquot)) {
			spin_unlock(&dq_list_lock);
			dquot->dq_sb->dq_op->release_dquot(dquot);
			goto restart;
		}
		/* Dquot is inactive and clean, now move it to free list */
		remove_free_dquot(dquot);
		atomic_dec(&dquot->dq_count);
		put_dquot_last(dquot);
	}
	spin_unlock(&dq_list_lock);
}

/*
 * Put reference to dquot
 */
void dqput(struct dquot *dquot)
{
	int ret;

	if (!dquot)
		return;
#ifdef CONFIG_QUOTA_DEBUG
@@ -780,7 +858,7 @@ void dqput(struct dquot *dquot)
	}
#endif
	dqstats_inc(DQST_DROPS);
we_slept:

	spin_lock(&dq_list_lock);
	if (atomic_read(&dquot->dq_count) > 1) {
		/* We have more than one user... nothing to do */
@@ -792,35 +870,15 @@ void dqput(struct dquot *dquot)
		spin_unlock(&dq_list_lock);
		return;
	}

	/* Need to release dquot? */
	if (dquot_dirty(dquot)) {
		spin_unlock(&dq_list_lock);
		/* Commit dquot before releasing */
		ret = dquot->dq_sb->dq_op->write_dquot(dquot);
		if (ret < 0) {
			quota_error(dquot->dq_sb, "Can't write quota structure"
				    " (error %d). Quota may get out of sync!",
				    ret);
			/*
			 * We clear dirty bit anyway, so that we avoid
			 * infinite loop here
			 */
			clear_dquot_dirty(dquot);
		}
		goto we_slept;
	}
	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
		spin_unlock(&dq_list_lock);
		dquot->dq_sb->dq_op->release_dquot(dquot);
		goto we_slept;
	}
	atomic_dec(&dquot->dq_count);
#ifdef CONFIG_QUOTA_DEBUG
	/* sanity check */
	BUG_ON(!list_empty(&dquot->dq_free));
#endif
	put_dquot_last(dquot);
	put_releasing_dquots(dquot);
	spin_unlock(&dq_list_lock);
	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
}
EXPORT_SYMBOL(dqput);

@@ -910,7 +968,7 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
	 * already finished or it will be canceled due to dq_count > 1 test */
	wait_on_dquot(dquot);
	/* Read the dquot / allocate space in quota file */
	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
	if (!dquot_active(dquot)) {
		int err;

		err = sb->dq_op->acquire_dquot(dquot);
@@ -1016,59 +1074,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
	return err;
}

/*
 * Remove references to dquots from inode and add dquot to list for freeing
 * if we have the last reference to dquot
 */
static void remove_inode_dquot_ref(struct inode *inode, int type,
				   struct list_head *tofree_head)
{
	struct dquot **dquots = i_dquot(inode);
	struct dquot *dquot = dquots[type];

	if (!dquot)
		return;

	dquots[type] = NULL;
	if (list_empty(&dquot->dq_free)) {
		/*
		 * The inode still has reference to dquot so it can't be in the
		 * free list
		 */
		spin_lock(&dq_list_lock);
		list_add(&dquot->dq_free, tofree_head);
		spin_unlock(&dq_list_lock);
	} else {
		/*
		 * Dquot is already in a list to put so we won't drop the last
		 * reference here.
		 */
		dqput(dquot);
	}
}

/*
 * Free list of dquots
 * Dquots are removed from inodes and no new references can be got so we are
 * the only ones holding reference
 */
static void put_dquot_list(struct list_head *tofree_head)
{
	struct list_head *act_head;
	struct dquot *dquot;

	act_head = tofree_head->next;
	while (act_head != tofree_head) {
		dquot = list_entry(act_head, struct dquot, dq_free);
		act_head = act_head->next;
		/* Remove dquot from the list so we won't have problems... */
		list_del_init(&dquot->dq_free);
		dqput(dquot);
	}
}

static void remove_dquot_ref(struct super_block *sb, int type,
		struct list_head *tofree_head)
static void remove_dquot_ref(struct super_block *sb, int type)
{
	struct inode *inode;
#ifdef CONFIG_QUOTA_DEBUG
@@ -1085,11 +1091,16 @@ static void remove_dquot_ref(struct super_block *sb, int type,
		 */
		spin_lock(&dq_data_lock);
		if (!IS_NOQUOTA(inode)) {
			struct dquot **dquots = i_dquot(inode);
			struct dquot *dquot = dquots[type];

#ifdef CONFIG_QUOTA_DEBUG
			if (unlikely(inode_get_rsv_space(inode) > 0))
				reserved = 1;
#endif
			remove_inode_dquot_ref(inode, type, tofree_head);
			dquots[type] = NULL;
			if (dquot)
				dqput(dquot);
		}
		spin_unlock(&dq_data_lock);
	}
@@ -1106,13 +1117,8 @@ static void remove_dquot_ref(struct super_block *sb, int type,
/* Gather all references from inodes and drop them */
static void drop_dquot_ref(struct super_block *sb, int type)
{
	LIST_HEAD(tofree_head);

	if (sb->dq_op) {
		remove_dquot_ref(sb, type, &tofree_head);
		synchronize_srcu(&dquot_srcu);
		put_dquot_list(&tofree_head);
	}
	if (sb->dq_op)
		remove_dquot_ref(sb, type);
}

static inline
@@ -1427,7 +1433,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
	return QUOTA_NL_NOWARN;
}

static int dquot_active(const struct inode *inode)
static int inode_quota_active(const struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

@@ -1450,7 +1456,7 @@ static int __dquot_initialize(struct inode *inode, int type)
	qsize_t rsv;
	int ret = 0;

	if (!dquot_active(inode))
	if (!inode_quota_active(inode))
		return 0;

	dquots = i_dquot(inode);
@@ -1558,7 +1564,7 @@ bool dquot_initialize_needed(struct inode *inode)
	struct dquot **dquots;
	int i;

	if (!dquot_active(inode))
	if (!inode_quota_active(inode))
		return false;

	dquots = i_dquot(inode);
@@ -1669,7 +1675,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
	int reserve = flags & DQUOT_SPACE_RESERVE;
	struct dquot **dquots;

	if (!dquot_active(inode)) {
	if (!inode_quota_active(inode)) {
		if (reserve) {
			spin_lock(&inode->i_lock);
			*inode_reserved_space(inode) += number;
@@ -1739,7 +1745,7 @@ int dquot_alloc_inode(struct inode *inode)
	struct dquot_warn warn[MAXQUOTAS];
	struct dquot * const *dquots;

	if (!dquot_active(inode))
	if (!inode_quota_active(inode))
		return 0;
	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
		warn[cnt].w_type = QUOTA_NL_NOWARN;
@@ -1782,7 +1788,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
	struct dquot **dquots;
	int cnt, index;

	if (!dquot_active(inode)) {
	if (!inode_quota_active(inode)) {
		spin_lock(&inode->i_lock);
		*inode_reserved_space(inode) -= number;
		__inode_add_bytes(inode, number);
@@ -1824,7 +1830,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
	struct dquot **dquots;
	int cnt, index;

	if (!dquot_active(inode)) {
	if (!inode_quota_active(inode)) {
		spin_lock(&inode->i_lock);
		*inode_reserved_space(inode) += number;
		__inode_sub_bytes(inode, number);
@@ -1868,7 +1874,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
	struct dquot **dquots;
	int reserve = flags & DQUOT_SPACE_RESERVE, index;

	if (!dquot_active(inode)) {
	if (!inode_quota_active(inode)) {
		if (reserve) {
			spin_lock(&inode->i_lock);
			*inode_reserved_space(inode) -= number;
@@ -1923,7 +1929,7 @@ void dquot_free_inode(struct inode *inode)
	struct dquot * const *dquots;
	int index;

	if (!dquot_active(inode))
	if (!inode_quota_active(inode))
		return;

	dquots = i_dquot(inode);
@@ -2094,7 +2100,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
	struct super_block *sb = inode->i_sb;
	int ret;

	if (!dquot_active(inode))
	if (!inode_quota_active(inode))
		return 0;

	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){