Unverified Commit c380b52f authored by Konstantin Komarov's avatar Konstantin Komarov
Browse files

fs/ntfs3: Change new sparse cluster processing



Remove ntfs_sparse_cluster.
Zero clusters in attr_allocate_clusters.
Fixes xfstest generic/263

Signed-off-by: default avatarKonstantin Komarov <almaz.alexandrovich@paragon-software.com>
parent 2f56a3f8
Loading
Loading
Loading
Loading
+122 −54
Original line number Original line Diff line number Diff line
@@ -149,7 +149,7 @@ static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
			   CLST *new_lcn)
			   CLST *new_lcn, CLST *new_len)
{
{
	int err;
	int err;
	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
@@ -169,20 +169,36 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
		if (err)
		if (err)
			goto out;
			goto out;


		if (new_lcn && vcn == vcn0)
		if (vcn == vcn0) {
			/* Return the first fragment. */
			if (new_lcn)
				*new_lcn = lcn;
				*new_lcn = lcn;
			if (new_len)
				*new_len = flen;
		}


		/* Add new fragment into run storage. */
		/* Add new fragment into run storage. */
		if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
			/* Undo last 'ntfs_look_for_free_space' */
			/* Undo last 'ntfs_look_for_free_space' */
			mark_as_free_ex(sbi, lcn, len, false);
			mark_as_free_ex(sbi, lcn, len, false);
			err = -ENOMEM;
			err = -ENOMEM;
			goto out;
			goto out;
		}
		}


		if (opt & ALLOCATE_ZERO) {
			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;

			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
						   (sector_t)lcn << shift,
						   (sector_t)flen << shift,
						   GFP_NOFS, 0);
			if (err)
				goto out;
		}

		vcn += flen;
		vcn += flen;


		if (flen >= len || opt == ALLOCATE_MFT ||
		if (flen >= len || (opt & ALLOCATE_MFT) ||
		    (fr && run->count - cnt >= fr)) {
		    (fr && run->count - cnt >= fr)) {
			*alen = vcn - vcn0;
			*alen = vcn - vcn0;
			return 0;
			return 0;
@@ -257,7 +273,8 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
		const char *data = resident_data(attr);
		const char *data = resident_data(attr);


		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
					     ALLOCATE_DEF, &alen, 0, NULL);
					     ALLOCATE_DEF, &alen, 0, NULL,
					     NULL);
		if (err)
		if (err)
			goto out1;
			goto out1;


@@ -552,13 +569,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
			/* ~3 bytes per fragment. */
			/* ~3 bytes per fragment. */
			err = attr_allocate_clusters(
			err = attr_allocate_clusters(
				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
				is_mft ? ALLOCATE_MFT : 0, &alen,
				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
				is_mft ? 0
				is_mft ? 0
				       : (sbi->record_size -
				       : (sbi->record_size -
					  le32_to_cpu(rec->used) + 8) /
					  le32_to_cpu(rec->used) + 8) /
							 3 +
							 3 +
						 1,
						 1,
				NULL);
				NULL, NULL);
			if (err)
			if (err)
				goto out;
				goto out;
		}
		}
@@ -855,8 +872,19 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
	return err;
	return err;
}
}


/*
 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
 *
 * @new == NULL means just to get current mapping for 'vcn'
 * @new != NULL means allocate real cluster if 'vcn' maps to hole
 * @zero - zeroout new allocated clusters
 *
 *  NOTE:
 *  - @new != NULL is called only for sparsed or compressed attributes.
 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
 */
int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
			CLST *len, bool *new)
			CLST *len, bool *new, bool zero)
{
{
	int err = 0;
	int err = 0;
	struct runs_tree *run = &ni->file.run;
	struct runs_tree *run = &ni->file.run;
@@ -865,29 +893,27 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
	struct ATTRIB *attr = NULL, *attr_b;
	struct ATTRIB *attr = NULL, *attr_b;
	struct ATTR_LIST_ENTRY *le, *le_b;
	struct ATTR_LIST_ENTRY *le, *le_b;
	struct mft_inode *mi, *mi_b;
	struct mft_inode *mi, *mi_b;
	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
	unsigned fr;
	u64 total_size;
	u64 total_size;
	u32 clst_per_frame;
	bool ok;


	if (new)
	if (new)
		*new = false;
		*new = false;


	/* Try to find in cache. */
	down_read(&ni->file.run_lock);
	down_read(&ni->file.run_lock);
	ok = run_lookup_entry(run, vcn, lcn, len, NULL);
	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
		*len = 0;
	up_read(&ni->file.run_lock);
	up_read(&ni->file.run_lock);


	if (ok && (*lcn != SPARSE_LCN || !new)) {
	if (*len) {
		/* Normal way. */
		if (*lcn != SPARSE_LCN || !new)
		return 0;
			return 0; /* Fast normal way without allocation. */
	}
		else if (clen > *len)

	if (!clen)
		clen = 1;

	if (ok && clen > *len)
			clen = *len;
			clen = *len;
	}


	/* No cluster in cache or we need to allocate cluster in hole. */
	sbi = ni->mi.sbi;
	sbi = ni->mi.sbi;
	cluster_bits = sbi->cluster_bits;
	cluster_bits = sbi->cluster_bits;


@@ -913,12 +939,6 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
		goto out;
		goto out;
	}
	}


	clst_per_frame = 1u << attr_b->nres.c_unit;
	to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);

	if (vcn + to_alloc > asize)
		to_alloc = asize - vcn;

	svcn = le64_to_cpu(attr_b->nres.svcn);
	svcn = le64_to_cpu(attr_b->nres.svcn);
	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;


@@ -937,36 +957,68 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
	}
	}


	/* Load in cache actual information. */
	err = attr_load_runs(attr, ni, run, NULL);
	err = attr_load_runs(attr, ni, run, NULL);
	if (err)
	if (err)
		goto out;
		goto out;


	if (!ok) {
	if (!*len) {
		ok = run_lookup_entry(run, vcn, lcn, len, NULL);
		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
		if (ok && (*lcn != SPARSE_LCN || !new)) {
			if (*lcn != SPARSE_LCN || !new)
			/* Normal way. */
				goto ok; /* Slow normal way without allocation. */
			err = 0;
			goto ok;
		}

		if (!ok && !new) {
			*len = 0;
			err = 0;
			goto ok;
		}


		if (ok && clen > *len) {
			if (clen > *len)
				clen = *len;
				clen = *len;
			to_alloc = (clen + clst_per_frame - 1) &
		} else if (!new) {
				   ~(clst_per_frame - 1);
			/* Here we may return -ENOENT.
			 * In any case caller gets zero length. */
			goto ok;
		}
		}
	}
	}


	if (!is_attr_ext(attr_b)) {
	if (!is_attr_ext(attr_b)) {
		/* The code below only for sparsed or compressed attributes. */
		err = -EINVAL;
		err = -EINVAL;
		goto out;
		goto out;
	}
	}


	vcn0 = vcn;
	to_alloc = clen;
	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
	/* Allocate frame aligned clusters.
	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
	if (attr_b->nres.c_unit) {
		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
		CLST cmask = ~(clst_per_frame - 1);

		/* Get frame aligned vcn and to_alloc. */
		vcn = vcn0 & cmask;
		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
		if (fr < clst_per_frame)
			fr = clst_per_frame;
		zero = true;

		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
		if (vcn < svcn || evcn1 <= vcn) {
			/* Load attribute for truncated vcn. */
			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
					    &vcn, &mi);
			if (!attr) {
				err = -EINVAL;
				goto out;
			}
			svcn = le64_to_cpu(attr->nres.svcn);
			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
			err = attr_load_runs(attr, ni, run, NULL);
			if (err)
				goto out;
		}
	}

	if (vcn + to_alloc > asize)
		to_alloc = asize - vcn;

	/* Get the last LCN to allocate from. */
	/* Get the last LCN to allocate from. */
	hint = 0;
	hint = 0;


@@ -980,18 +1032,33 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
		hint = -1;
		hint = -1;
	}
	}


	err = attr_allocate_clusters(
	/* Allocate and zeroout new clusters. */
		sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
		(sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
		lcn);
				     fr, lcn, len);
	if (err)
	if (err)
		goto out;
		goto out;
	*new = true;
	*new = true;


	end = vcn + *len;
	end = vcn + alen;

	total_size = le64_to_cpu(attr_b->nres.total_size) +
	total_size = le64_to_cpu(attr_b->nres.total_size) +
		     ((u64)*len << cluster_bits);
		     ((u64)alen << cluster_bits);

	if (vcn != vcn0) {
		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
			err = -EINVAL;
			goto out;
		}
		if (*lcn == SPARSE_LCN) {
			/* Internal error. Should not happened. */
			WARN_ON(1);
			err = -EINVAL;
			goto out;
		}
		/* Check case when vcn0 + len overlaps new allocated clusters. */
		if (vcn0 + *len > end)
			*len = end - vcn0;
	}


repack:
repack:
	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
@@ -1516,7 +1583,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
	struct ATTRIB *attr = NULL, *attr_b;
	struct ATTRIB *attr = NULL, *attr_b;
	struct ATTR_LIST_ENTRY *le, *le_b;
	struct ATTR_LIST_ENTRY *le, *le_b;
	struct mft_inode *mi, *mi_b;
	struct mft_inode *mi, *mi_b;
	CLST svcn, evcn1, next_svcn, lcn, len;
	CLST svcn, evcn1, next_svcn, len;
	CLST vcn, end, clst_data;
	CLST vcn, end, clst_data;
	u64 total_size, valid_size, data_size;
	u64 total_size, valid_size, data_size;


@@ -1592,8 +1659,9 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
		}
		}


		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
					     hint + 1, len - clst_data, NULL, 0,
					     hint + 1, len - clst_data, NULL,
					     &alen, 0, &lcn);
					     ALLOCATE_DEF, &alen, 0, NULL,
					     NULL);
		if (err)
		if (err)
			goto out;
			goto out;


+33 −113
Original line number Original line Diff line number Diff line
@@ -122,8 +122,8 @@ static int ntfs_extend_initialized_size(struct file *file,
			bits = sbi->cluster_bits;
			bits = sbi->cluster_bits;
			vcn = pos >> bits;
			vcn = pos >> bits;


			err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
			err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
						  NULL);
						  false);
			if (err)
			if (err)
				goto out;
				goto out;


@@ -180,17 +180,17 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
	struct address_space *mapping = inode->i_mapping;
	struct address_space *mapping = inode->i_mapping;
	u32 blocksize = 1 << inode->i_blkbits;
	u32 blocksize = 1 << inode->i_blkbits;
	pgoff_t idx = vbo >> PAGE_SHIFT;
	pgoff_t idx = vbo >> PAGE_SHIFT;
	u32 z_start = vbo & (PAGE_SIZE - 1);
	u32 from = vbo & (PAGE_SIZE - 1);
	pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
	pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
	loff_t page_off;
	loff_t page_off;
	struct buffer_head *head, *bh;
	struct buffer_head *head, *bh;
	u32 bh_next, bh_off, z_end;
	u32 bh_next, bh_off, to;
	sector_t iblock;
	sector_t iblock;
	struct page *page;
	struct page *page;


	for (; idx < idx_end; idx += 1, z_start = 0) {
	for (; idx < idx_end; idx += 1, from = 0) {
		page_off = (loff_t)idx << PAGE_SHIFT;
		page_off = (loff_t)idx << PAGE_SHIFT;
		z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
		to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
						     : PAGE_SIZE;
						     : PAGE_SIZE;
		iblock = page_off >> inode->i_blkbits;
		iblock = page_off >> inode->i_blkbits;


@@ -208,7 +208,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
		do {
		do {
			bh_next = bh_off + blocksize;
			bh_next = bh_off + blocksize;


			if (bh_next <= z_start || bh_off >= z_end)
			if (bh_next <= from || bh_off >= to)
				continue;
				continue;


			if (!buffer_mapped(bh)) {
			if (!buffer_mapped(bh)) {
@@ -242,7 +242,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
		} while (bh_off = bh_next, iblock += 1,
		} while (bh_off = bh_next, iblock += 1,
			 head != (bh = bh->b_this_page));
			 head != (bh = bh->b_this_page));


		zero_user_segment(page, z_start, z_end);
		zero_user_segment(page, from, to);


		unlock_page(page);
		unlock_page(page);
		put_page(page);
		put_page(page);
@@ -253,80 +253,6 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
	return err;
	return err;
}
}


/*
 * ntfs_sparse_cluster - Helper function to zero a new allocated clusters.
 *
 * NOTE: 512 <= cluster size <= 2M
 */
void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
			 CLST len)
{
	struct address_space *mapping = inode->i_mapping;
	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
	u8 cluster_bits = sbi->cluster_bits;
	u64 vbo = (u64)vcn << cluster_bits;
	u64 bytes = (u64)len << cluster_bits;
	u32 blocksize = 1 << inode->i_blkbits;
	pgoff_t idx0 = page0 ? page0->index : -1;
	loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
	loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
	pgoff_t idx = vbo_clst >> PAGE_SHIFT;
	u32 from = vbo_clst & (PAGE_SIZE - 1);
	pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
	loff_t page_off;
	u32 to;
	bool partial;
	struct page *page;

	for (; idx < idx_end; idx += 1, from = 0) {
		page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);

		if (!page)
			continue;

		page_off = (loff_t)idx << PAGE_SHIFT;
		to = (page_off + PAGE_SIZE) > end ? (end - page_off)
						  : PAGE_SIZE;
		partial = false;

		if ((from || PAGE_SIZE != to) &&
		    likely(!page_has_buffers(page))) {
			create_empty_buffers(page, blocksize, 0);
		}

		if (page_has_buffers(page)) {
			struct buffer_head *head, *bh;
			u32 bh_off = 0;

			bh = head = page_buffers(page);
			do {
				u32 bh_next = bh_off + blocksize;

				if (from <= bh_off && bh_next <= to) {
					set_buffer_uptodate(bh);
					mark_buffer_dirty(bh);
				} else if (!buffer_uptodate(bh)) {
					partial = true;
				}
				bh_off = bh_next;
			} while (head != (bh = bh->b_this_page));
		}

		zero_user_segment(page, from, to);

		if (!partial)
			SetPageUptodate(page);
		flush_dcache_page(page);
		set_page_dirty(page);

		if (idx != idx0) {
			unlock_page(page);
			put_page(page);
		}
		cond_resched();
	}
}

/*
/*
 * ntfs_file_mmap - file_operations::mmap
 * ntfs_file_mmap - file_operations::mmap
 */
 */
@@ -368,13 +294,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)


			for (; vcn < end; vcn += len) {
			for (; vcn < end; vcn += len) {
				err = attr_data_get_block(ni, vcn, 1, &lcn,
				err = attr_data_get_block(ni, vcn, 1, &lcn,
							  &len, &new);
							  &len, &new, true);
				if (err)
				if (err)
					goto out;
					goto out;

				if (!new)
					continue;
				ntfs_sparse_cluster(inode, NULL, vcn, 1);
			}
			}
		}
		}


@@ -518,7 +440,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
	struct ntfs_sb_info *sbi = sb->s_fs_info;
	struct ntfs_sb_info *sbi = sb->s_fs_info;
	struct ntfs_inode *ni = ntfs_i(inode);
	struct ntfs_inode *ni = ntfs_i(inode);
	loff_t end = vbo + len;
	loff_t end = vbo + len;
	loff_t vbo_down = round_down(vbo, PAGE_SIZE);
	loff_t vbo_down = round_down(vbo, max_t(unsigned long,
						sbi->cluster_size, PAGE_SIZE));
	bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
	bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
	loff_t i_size, new_size;
	loff_t i_size, new_size;
	bool map_locked;
	bool map_locked;
@@ -571,7 +494,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
		u32 frame_size;
		u32 frame_size;
		loff_t mask, vbo_a, end_a, tmp;
		loff_t mask, vbo_a, end_a, tmp;


		err = filemap_write_and_wait_range(mapping, vbo, LLONG_MAX);
		err = filemap_write_and_wait_range(mapping, vbo_down,
						   LLONG_MAX);
		if (err)
		if (err)
			goto out;
			goto out;


@@ -672,39 +596,35 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
			goto out;
			goto out;


		if (is_supported_holes) {
		if (is_supported_holes) {
			CLST vcn_v = bytes_to_cluster(sbi, ni->i_valid);
			CLST vcn = vbo >> sbi->cluster_bits;
			CLST vcn = vbo >> sbi->cluster_bits;
			CLST cend = bytes_to_cluster(sbi, end);
			CLST cend = bytes_to_cluster(sbi, end);
			CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
			CLST lcn, clen;
			CLST lcn, clen;
			bool new;
			bool new;


			if (cend_v > cend)
				cend_v = cend;

			/*
			/*
			 * Allocate but do not zero new clusters. (see below comments)
			 * Allocate and zero new clusters.
			 * This breaks security: One can read unused on-disk areas.
			 * Zeroing these clusters may be too long.
			 * Zeroing these clusters may be too long.
			 * Maybe we should check here for root rights?
			 */
			 */
			for (; vcn < cend; vcn += clen) {
			for (; vcn < cend_v; vcn += clen) {
				err = attr_data_get_block(ni, vcn, cend - vcn,
				err = attr_data_get_block(ni, vcn, cend_v - vcn,
							  &lcn, &clen, &new);
							  &lcn, &clen, &new,
							  true);
				if (err)
				if (err)
					goto out;
					goto out;
				if (!new || vcn >= vcn_v)
			}
					continue;

			/*
			/*
				 * Unwritten area.
			 * Allocate but not zero new clusters.
				 * NTFS is not able to store several unwritten areas.
				 * Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
				 *
				 * Dangerous in case:
				 * 1G of sparsed clusters + 1 cluster of data =>
				 * valid_size == 1G + 1 cluster
				 * fallocate(1G) will zero 1G and this can be very long
				 * xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
			 */
			 */
				ntfs_sparse_cluster(inode, NULL, vcn,
			for (; vcn < cend; vcn += clen) {
						    min(vcn_v - vcn, clen));
				err = attr_data_get_block(ni, vcn, cend - vcn,
							  &lcn, &clen, &new,
							  false);
				if (err)
					goto out;
			}
			}
		}
		}


@@ -925,8 +845,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
		frame_vbo = valid & ~(frame_size - 1);
		frame_vbo = valid & ~(frame_size - 1);
		off = valid & (frame_size - 1);
		off = valid & (frame_size - 1);


		err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
		err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
					  &clen, NULL);
					  &clen, NULL, false);
		if (err)
		if (err)
			goto out;
			goto out;


+1 −1
Original line number Original line Diff line number Diff line
@@ -2224,7 +2224,7 @@ int ni_decompress_file(struct ntfs_inode *ni)


		for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
		for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
			err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
			err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
						  &clen, &new);
						  &clen, &new, false);
			if (err)
			if (err)
				goto out;
				goto out;
		}
		}
+2 −2
Original line number Original line Diff line number Diff line
@@ -1347,8 +1347,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,


	run_init(&run);
	run_init(&run);


	err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
	err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, ALLOCATE_DEF,
				     NULL);
				     &alen, 0, NULL, NULL);
	if (err)
	if (err)
		goto out;
		goto out;


+5 −7
Original line number Original line Diff line number Diff line
@@ -577,7 +577,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
	off = vbo & sbi->cluster_mask;
	off = vbo & sbi->cluster_mask;
	new = false;
	new = false;


	err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
	err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL,
				  create && sbi->cluster_size > PAGE_SIZE);
	if (err)
	if (err)
		goto out;
		goto out;


@@ -595,11 +596,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
		WARN_ON(1);
		WARN_ON(1);
	}
	}


	if (new) {
	if (new)
		set_buffer_new(bh);
		set_buffer_new(bh);
		if ((len << cluster_bits) > block_size)
			ntfs_sparse_cluster(inode, page, vcn, len);
	}


	lbo = ((u64)lcn << cluster_bits) + off;
	lbo = ((u64)lcn << cluster_bits) + off;


@@ -1537,8 +1535,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
				cpu_to_le64(ntfs_up_cluster(sbi, nsize));
				cpu_to_le64(ntfs_up_cluster(sbi, nsize));


			err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
			err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
						     clst, NULL, 0, &alen, 0,
						     clst, NULL, ALLOCATE_DEF,
						     NULL);
						     &alen, 0, NULL, NULL);
			if (err)
			if (err)
				goto out5;
				goto out5;


Loading