Unverified Commit aa30eccb authored by Konstantin Komarov's avatar Konstantin Komarov
Browse files

fs/ntfs3: Fallocate (FALLOC_FL_INSERT_RANGE) implementation



Add functions for inserting hole in file and inserting range in run.

Signed-off-by: default avatarKonstantin Komarov <almaz.alexandrovich@paragon-software.com>
parent f759942b
Loading
Loading
Loading
Loading
+176 −0
Original line number Diff line number Diff line
@@ -2081,3 +2081,179 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)

	return err;
}

/*
 * attr_insert_range - Insert range (hole) in file.
 * Not for normal files.
 */
int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
{
	int err = 0;
	struct runs_tree *run = &ni->file.run;
	struct ntfs_sb_info *sbi = ni->mi.sbi;
	struct ATTRIB *attr = NULL, *attr_b;
	struct ATTR_LIST_ENTRY *le, *le_b;
	struct mft_inode *mi, *mi_b;
	CLST vcn, svcn, evcn1, len, next_svcn;
	u64 data_size, alloc_size;
	u32 mask;
	__le16 a_flags;

	if (!bytes)
		return 0;

	le_b = NULL;
	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
	if (!attr_b)
		return -ENOENT;

	if (!is_attr_ext(attr_b)) {
		/* It was checked above. See fallocate. */
		return -EOPNOTSUPP;
	}

	if (!attr_b->non_res) {
		data_size = le32_to_cpu(attr_b->res.data_size);
		mask = sbi->cluster_mask; /* cluster_size - 1 */
	} else {
		data_size = le64_to_cpu(attr_b->nres.data_size);
		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
	}

	if (vbo > data_size) {
		/* Insert range after the file size is not allowed. */
		return -EINVAL;
	}

	if ((vbo & mask) || (bytes & mask)) {
		/* Allow to insert only frame aligned ranges. */
		return -EINVAL;
	}

	vcn = vbo >> sbi->cluster_bits;
	len = bytes >> sbi->cluster_bits;

	down_write(&ni->file.run_lock);

	if (!attr_b->non_res) {
		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
				    data_size + bytes, NULL, false, &attr);
		if (err)
			goto out;
		if (!attr->non_res) {
			/* Still resident. */
			char *data = Add2Ptr(attr, attr->res.data_off);

			memmove(data + bytes, data, bytes);
			memset(data, 0, bytes);
			err = 0;
			goto out;
		}
		/* Resident files becomes nonresident. */
		le_b = NULL;
		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
				      &mi_b);
		if (!attr_b)
			return -ENOENT;
		if (!attr_b->non_res) {
			err = -EINVAL;
			goto out;
		}
		data_size = le64_to_cpu(attr_b->nres.data_size);
		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
	}

	/*
	 * Enumerate all attribute segments and shift start vcn.
	 */
	a_flags = attr_b->flags;
	svcn = le64_to_cpu(attr_b->nres.svcn);
	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;

	if (svcn <= vcn && vcn < evcn1) {
		attr = attr_b;
		le = le_b;
		mi = mi_b;
	} else if (!le_b) {
		err = -EINVAL;
		goto out;
	} else {
		le = le_b;
		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
				    &mi);
		if (!attr) {
			err = -EINVAL;
			goto out;
		}

		svcn = le64_to_cpu(attr->nres.svcn);
		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
	}

	run_truncate(run, 0); /* clear cached values. */
	err = attr_load_runs(attr, ni, run, NULL);
	if (err)
		goto out;

	if (!run_insert_range(run, vcn, len)) {
		err = -ENOMEM;
		goto out;
	}

	/* Try to pack in current record as much as possible. */
	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
	if (err)
		goto out;

	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
	run_truncate_head(run, next_svcn);

	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
	       attr->type == ATTR_DATA && !attr->name_len) {
		le64_add_cpu(&attr->nres.svcn, len);
		le64_add_cpu(&attr->nres.evcn, len);
		if (le) {
			le->vcn = attr->nres.svcn;
			ni->attr_list.dirty = true;
		}
		mi->dirty = true;
	}

	/*
	 * Update primary attribute segment in advance.
	 * pointer attr_b may become invalid (layout of mft is changed)
	 */
	if (vbo <= ni->i_valid)
		ni->i_valid += bytes;

	attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
	attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);

	/* ni->valid may be not equal valid_size (temporary). */
	if (ni->i_valid > data_size + bytes)
		attr_b->nres.valid_size = attr_b->nres.data_size;
	else
		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
	mi_b->dirty = true;

	if (next_svcn < evcn1 + len) {
		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
					    next_svcn, evcn1 + len - next_svcn,
					    a_flags, NULL, NULL);
		if (err)
			goto out;
	}

	ni->vfs_inode.i_size += bytes;
	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
	mark_inode_dirty(&ni->vfs_inode);

out:
	run_truncate(run, 0); /* clear cached values. */

	up_write(&ni->file.run_lock);
	if (err)
		make_bad_inode(&ni->vfs_inode);

	return err;
}
+3 −1
Original line number Diff line number Diff line
@@ -440,6 +440,7 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
			u64 new_valid);
int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);

/* Functions from attrlist.c */
@@ -775,10 +776,11 @@ bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
void run_truncate(struct runs_tree *run, CLST vcn);
void run_truncate_head(struct runs_tree *run, CLST vcn);
void run_truncate_around(struct runs_tree *run, CLST vcn);
bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index);
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
		   bool is_mft);
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
		   CLST *lcn, CLST *len);
bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
+43 −0
Original line number Diff line number Diff line
@@ -547,6 +547,49 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
	return true;
}

/* run_insert_range
 *
 * Helper for attr_insert_range(),
 * which is helper for fallocate(insert_range).
 */
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
{
	size_t index;
	struct ntfs_run *r, *e;

	if (WARN_ON(!run_lookup(run, vcn, &index)))
		return false; /* Should never be here. */

	e = run->runs + run->count;
	r = run->runs + index;

	r = run->runs + index;
	if (vcn > r->vcn)
		r += 1;

	for (; r < e; r++)
		r->vcn += len;

	r = run->runs + index;

	if (vcn > r->vcn) {
		/* split fragment. */
		CLST len1 = vcn - r->vcn;
		CLST len2 = r->len - len1;
		CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);

		r->len = len1;

		if (!run_add_entry(run, vcn + len, lcn2, len2, false))
			return false;
	}

	if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
		return false;

	return true;
}

/*
 * run_get_entry - Return index-th mapped region.
 */