Commit 6fdf8864 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:
 "Several xes and one old ioctl deprecation. Namely there's fix for
  crashes/warnings with lzo compression that was suspected to be caused
  by first pull merge resolution, but it was a different bug.

  Summary:

   - regression fix for a crash in lzo due to missing boundary checks of
     the page array

   - fix crashes on ARM64 due to missing barriers when synchronizing
     status bits between work queues

   - silence lockdep when reading chunk tree during mount

   - fix false positive warning in integrity checker on devices with
     disabled write caching

   - fix signedness of bitfields in scrub

   - start deprecation of balance v1 ioctl"

* tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: deprecate BTRFS_IOC_BALANCE ioctl
  btrfs: make 1-bit bit-fields of scrub_page unsigned int
  btrfs: check-integrity: fix a warning on write caching disabled disk
  btrfs: silence lockdep when reading chunk tree during mount
  btrfs: fix memory ordering between normal and ordered work functions
  btrfs: fix a out-of-bound access in copy_compressed_data_to_page()
parents db850a9b 6c405b24
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
				  ordered_list);
		if (!test_bit(WORK_DONE_BIT, &work->flags))
			break;
		/*
		 * Orders all subsequent loads after reading WORK_DONE_BIT,
		 * paired with the smp_mb__before_atomic in btrfs_work_helper
		 * this guarantees that the ordered function will see all
		 * updates from ordinary work function.
		 */
		smp_rmb();

		/*
		 * we are going to call the ordered done function, but
@@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work)
	thresh_exec_hook(wq);
	work->func(work);
	if (need_order) {
		/*
		 * Ensures all memory accesses done in the work function are
		 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
		 * which is going to executed the ordered work sees them.
		 * Pairs with the smp_rmb in run_ordered_work.
		 */
		smp_mb__before_atomic();
		set_bit(WORK_DONE_BIT, &work->flags);
		run_ordered_work(wq, work);
	} else {
+13 −1
Original line number Diff line number Diff line
@@ -3978,11 +3978,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
 */
static void write_dev_flush(struct btrfs_device *device)
{
	struct request_queue *q = bdev_get_queue(device->bdev);
	struct bio *bio = device->flush_bio;

#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
	/*
	 * When a disk has write caching disabled, we skip submission of a bio
	 * with flush and sync requests before writing the superblock, since
	 * it's not needed. However when the integrity checker is enabled, this
	 * results in reports that there are metadata blocks referred by a
	 * superblock that were not properly flushed. So don't skip the bio
	 * submission only when the integrity checker is enabled for the sake
	 * of simplicity, since this is a debug tool and not meant for use in
	 * non-debug builds.
	 */
	struct request_queue *q = bdev_get_queue(device->bdev);
	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
		return;
#endif

	bio_reset(bio);
	bio->bi_end_io = btrfs_end_empty_barrier;
+4 −0
Original line number Diff line number Diff line
@@ -3985,6 +3985,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
	bool need_unlock; /* for mut. excl. ops lock */
	int ret;

	if (!arg)
		btrfs_warn(fs_info,
	"IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18");

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

+12 −1
Original line number Diff line number Diff line
@@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf)
static int copy_compressed_data_to_page(char *compressed_data,
					size_t compressed_size,
					struct page **out_pages,
					unsigned long max_nr_page,
					u32 *cur_out,
					const u32 sectorsize)
{
@@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data,
	struct page *cur_page;
	char *kaddr;

	if ((*cur_out / PAGE_SIZE) >= max_nr_page)
		return -E2BIG;

	/*
	 * We never allow a segment header crossing sector boundary, previous
	 * run should ensure we have enough space left inside the sector.
@@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data,
				     orig_out + compressed_size - *cur_out);

		kunmap(cur_page);

		if ((*cur_out / PAGE_SIZE) >= max_nr_page)
			return -E2BIG;

		cur_page = out_pages[*cur_out / PAGE_SIZE];
		/* Allocate a new page */
		if (!cur_page) {
@@ -203,6 +211,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
	const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
	struct page *page_in = NULL;
	char *sizes_ptr;
	const unsigned long max_nr_page = *out_pages;
	int ret = 0;
	/* Points to the file offset of input data */
	u64 cur_in = start;
@@ -210,6 +219,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
	u32 cur_out = 0;
	u32 len = *total_out;

	ASSERT(max_nr_page > 0);
	*out_pages = 0;
	*total_out = 0;
	*total_in = 0;
@@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
		}

		ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
						   pages, &cur_out, sectorsize);
						   pages, max_nr_page,
						   &cur_out, sectorsize);
		if (ret < 0)
			goto out;

+2 −2
Original line number Diff line number Diff line
@@ -73,8 +73,8 @@ struct scrub_page {
	u64			physical_for_dev_replace;
	atomic_t		refs;
	u8			mirror_num;
	int			have_csum:1;
	int			io_error:1;
	unsigned int		have_csum:1;
	unsigned int		io_error:1;
	u8			csum[BTRFS_CSUM_SIZE];

	struct scrub_recover	*recover;
Loading