Commit 138a12d8 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba
Browse files

btrfs: rip out btrfs_space_info::total_bytes_pinned



We used this in may_commit_transaction() in order to determine if we
needed to commit the transaction.  However we no longer have that logic
and thus have no use of this counter anymore, so delete it.

Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 3ffad696
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -1399,7 +1399,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
						     -block_group->pinned);
		space_info->bytes_readonly += block_group->pinned;
		__btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
		block_group->pinned = 0;

		spin_unlock(&block_group->lock);
@@ -3068,8 +3067,6 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
			spin_unlock(&cache->lock);
			spin_unlock(&cache->space_info->lock);

			__btrfs_mod_total_bytes_pinned(cache->space_info,
						       num_bytes);
			set_extent_dirty(&trans->transaction->pinned_extents,
					 bytenr, bytenr + num_bytes - 1,
					 GFP_NOFS | __GFP_NOFAIL);
+0 −26
Original line number Diff line number Diff line
@@ -641,7 +641,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
	struct btrfs_delayed_ref_root *delayed_refs =
		&trans->transaction->delayed_refs;
	struct btrfs_fs_info *fs_info = trans->fs_info;
	u64 flags = btrfs_ref_head_to_space_flags(existing);
	int old_ref_mod;

	BUG_ON(existing->is_data != update->is_data);
@@ -711,26 +710,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
		}
	}

	/*
	 * This handles the following conditions:
	 *
	 * 1. We had a ref mod of 0 or more and went negative, indicating that
	 *    we may be freeing space, so add our space to the
	 *    total_bytes_pinned counter.
	 * 2. We were negative and went to 0 or positive, so no longer can say
	 *    that the space would be pinned, decrement our counter from the
	 *    total_bytes_pinned counter.
	 * 3. We are now at 0 and have ->must_insert_reserved set, which means
	 *    this was a new allocation and then we dropped it, and thus must
	 *    add our space to the total_bytes_pinned counter.
	 */
	if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
		btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
	else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
		btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
	else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
		btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);

	spin_unlock(&existing->lock);
}

@@ -835,17 +814,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
		head_ref = existing;
	} else {
		u64 flags = btrfs_ref_head_to_space_flags(head_ref);

		if (head_ref->is_data && head_ref->ref_mod < 0) {
			delayed_refs->pending_csums += head_ref->num_bytes;
			trans->delayed_ref_updates +=
				btrfs_csum_bytes_to_leaves(trans->fs_info,
							   head_ref->num_bytes);
		}
		if (head_ref->ref_mod < 0)
			btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
						     head_ref->num_bytes);
		delayed_refs->num_heads++;
		delayed_refs->num_heads_ready++;
		atomic_inc(&delayed_refs->num_entries);
+0 −3
Original line number Diff line number Diff line
@@ -4680,9 +4680,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
			cache->space_info->bytes_reserved -= head->num_bytes;
			spin_unlock(&cache->lock);
			spin_unlock(&cache->space_info->lock);
			percpu_counter_add_batch(
				&cache->space_info->total_bytes_pinned,
				head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);

			btrfs_put_block_group(cache);

+0 −15
Original line number Diff line number Diff line
@@ -1804,19 +1804,6 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
		nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
	}

	/*
	 * We were dropping refs, or had a new ref and dropped it, and thus must
	 * adjust down our total_bytes_pinned, the space may or may not have
	 * been pinned and so is accounted for properly in the pinned space by
	 * now.
	 */
	if (head->total_ref_mod < 0 ||
	    (head->total_ref_mod == 0 && head->must_insert_reserved)) {
		u64 flags = btrfs_ref_head_to_space_flags(head);

		btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
	}

	btrfs_delayed_refs_rsv_release(fs_info, nr_items);
}

@@ -2551,7 +2538,6 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
	spin_unlock(&cache->lock);
	spin_unlock(&cache->space_info->lock);

	__btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
	set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
	return 0;
@@ -2762,7 +2748,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
		cache->pinned -= len;
		btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
		space_info->max_extent_size = 0;
		__btrfs_mod_total_bytes_pinned(space_info, -len);
		if (cache->ro) {
			space_info->bytes_readonly += len;
			readonly = true;
+0 −7
Original line number Diff line number Diff line
@@ -192,13 +192,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
	if (!space_info)
		return -ENOMEM;

	ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
				 GFP_KERNEL);
	if (ret) {
		kfree(space_info);
		return ret;
	}

	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		INIT_LIST_HEAD(&space_info->block_groups[i]);
	init_rwsem(&space_info->groups_sem);
Loading