Commit f9bff0e3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

minmax: add in_range() macro

Patch series "New page table range API", v6.

This patchset changes the API used by the MM to set up page table entries.
The four APIs are:

    set_ptes(mm, addr, ptep, pte, nr)
    update_mmu_cache_range(vma, addr, ptep, nr)
    flush_dcache_folio(folio) 
    flush_icache_pages(vma, page, nr)

flush_dcache_folio() isn't technically new, but no architecture
implemented it, so I've done that for them.  The old APIs remain around
but are mostly implemented by calling the new interfaces.

The new APIs are based around setting up N page table entries at once. 
The N entries belong to the same PMD, the same folio and the same VMA, so
ptep++ is a legitimate operation, and locking is taken care of for you. 
Some architectures can do a better job of it than just a loop, but I have
hesitated to make too deep a change to architectures I don't understand
well.

One thing I have changed in every architecture is that PG_arch_1 is now a
per-folio bit instead of a per-page bit when used for dcache clean/dirty
tracking.  This was something that would have to happen eventually, and it
makes sense to do it now rather than iterate over every page involved in a
cache flush and figure out if it needs to happen.

The point of all this is better performance, and Fengwei Yin has measured
improvement on x86.  I suspect you'll see improvement on your architecture
too.  Try the new will-it-scale test mentioned here:
https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/
You'll need to run it on an XFS filesystem and have
CONFIG_TRANSPARENT_HUGEPAGE set.

This patchset is the basis for much of the anonymous large folio work
being done by Ryan, so it's received quite a lot of testing over the last
few months.


This patch (of 38):

Determine if a value lies within a range more efficiently (subtraction +
comparison vs two comparisons and an AND).  It also has useful (under some
circumstances) behaviour if the range exceeds the maximum value of the
type.  Convert all the conflicting definitions of in_range() within the
kernel; some can use the generic definition while others need their own
definition.

Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f82e6bf9
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
	return 0;
}

static bool in_range(unsigned long start, unsigned long size,
static bool range_in_range(unsigned long start, unsigned long size,
	unsigned long range_start, unsigned long range_end)
{
	return start >= range_start && start < range_end &&
@@ -63,8 +63,8 @@ static int change_memory_common(unsigned long addr, int numpages,
	if (!size)
		return 0;

	if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
	    !in_range(start, size, VMALLOC_START, VMALLOC_END))
	if (!range_in_range(start, size, MODULES_VADDR, MODULES_END) &&
	    !range_in_range(start, size, VMALLOC_START, VMALLOC_END))
		return -EINVAL;

	return __change_memory_common(start, size, set_mask, clear_mask);
+1 −1
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ static inline void set_range(struct malidp_range *rg, u32 start, u32 end)
	rg->end   = end;
}

static inline bool in_range(struct malidp_range *rg, u32 v)
static inline bool malidp_in_range(struct malidp_range *rg, u32 v)
{
	return (v >= rg->start) && (v <= rg->end);
}
+12 −12
Original line number Diff line number Diff line
@@ -305,12 +305,12 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
	if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
		return -EINVAL;

	if (!in_range(&layer->hsize_in, src_w)) {
	if (!malidp_in_range(&layer->hsize_in, src_w)) {
		DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
		return -EINVAL;
	}

	if (!in_range(&layer->vsize_in, src_h)) {
	if (!malidp_in_range(&layer->vsize_in, src_h)) {
		DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
		return -EINVAL;
	}
@@ -452,14 +452,14 @@ komeda_scaler_check_cfg(struct komeda_scaler *scaler,
	hsize_out = dflow->out_w;
	vsize_out = dflow->out_h;

	if (!in_range(&scaler->hsize, hsize_in) ||
	    !in_range(&scaler->hsize, hsize_out)) {
	if (!malidp_in_range(&scaler->hsize, hsize_in) ||
	    !malidp_in_range(&scaler->hsize, hsize_out)) {
		DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
		return -EINVAL;
	}

	if (!in_range(&scaler->vsize, vsize_in) ||
	    !in_range(&scaler->vsize, vsize_out)) {
	if (!malidp_in_range(&scaler->vsize, vsize_in) ||
	    !malidp_in_range(&scaler->vsize, vsize_out)) {
		DRM_DEBUG_ATOMIC("Invalid vertical sizes");
		return -EINVAL;
	}
@@ -574,13 +574,13 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
		return -EINVAL;
	}

	if (!in_range(&splitter->hsize, dflow->in_w)) {
	if (!malidp_in_range(&splitter->hsize, dflow->in_w)) {
		DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
				 dflow->in_w);
		return -EINVAL;
	}

	if (!in_range(&splitter->vsize, dflow->in_h)) {
	if (!malidp_in_range(&splitter->vsize, dflow->in_h)) {
		DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
				 dflow->in_h);
		return -EINVAL;
@@ -624,13 +624,13 @@ komeda_merger_validate(struct komeda_merger *merger,
		return -EINVAL;
	}

	if (!in_range(&merger->hsize_merged, output->out_w)) {
	if (!malidp_in_range(&merger->hsize_merged, output->out_w)) {
		DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
				 output->out_w);
		return -EINVAL;
	}

	if (!in_range(&merger->vsize_merged, output->out_h)) {
	if (!malidp_in_range(&merger->vsize_merged, output->out_h)) {
		DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
				 output->out_h);
		return -EINVAL;
@@ -866,8 +866,8 @@ void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
	 * input/output range.
	 */
	if (dflow->en_scaling && scaler)
		dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
				  !in_range(&scaler->hsize, dflow->out_w);
		dflow->en_split = !malidp_in_range(&scaler->hsize, dflow->in_w) ||
				  !malidp_in_range(&scaler->hsize, dflow->out_w);
}

static bool merger_is_available(struct komeda_pipeline *pipe,
+0 −6
Original line number Diff line number Diff line
@@ -676,12 +676,6 @@ struct block_header {
	u32 data[];
};

/* this should be a general kernel helper */
static int in_range(u32 addr, u32 start, u32 size)
{
	return addr >= start && addr < start + size;
}

static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
{
	if (!in_range(blk->addr, bo->iova, bo->size))
+9 −9
Original line number Diff line number Diff line
@@ -2126,7 +2126,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
	.set_link_ksettings = set_link_ksettings,
};

static int in_range(int val, int lo, int hi)
static int cxgb_in_range(int val, int lo, int hi)
{
	return val < 0 || (val <= hi && val >= lo);
}
@@ -2162,19 +2162,19 @@ static int cxgb_siocdevprivate(struct net_device *dev,
			return -EINVAL;
		if (t.qset_idx >= SGE_QSETS)
			return -EINVAL;
		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
		    !in_range(t.cong_thres, 0, 255) ||
		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
		if (!cxgb_in_range(t.intr_lat, 0, M_NEWTIMER) ||
		    !cxgb_in_range(t.cong_thres, 0, 255) ||
		    !cxgb_in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
			      MAX_TXQ_ENTRIES) ||
		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
		    !cxgb_in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
			      MAX_TXQ_ENTRIES) ||
		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
		    !cxgb_in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
			      MAX_CTRL_TXQ_ENTRIES) ||
		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
		    !cxgb_in_range(t.fl_size[0], MIN_FL_ENTRIES,
			      MAX_RX_BUFFERS) ||
		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
		    !cxgb_in_range(t.fl_size[1], MIN_FL_ENTRIES,
			      MAX_RX_JUMBO_BUFFERS) ||
		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
		    !cxgb_in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
			      MAX_RSPQ_ENTRIES))
			return -EINVAL;

Loading