Unverified Commit d1d14cfb authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13350 mm: replace xa_get_order with xas_get_order where appropriate

Merge Pull Request from: @wedm23414 
 
mainline inclusion
from mainline-v6.12-rc1
commit 354a595a4a4d9dfc0d3e5703c6c5520e6c2f52d8
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IB5CID

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=354a595a4a4d9dfc0d3e5703c6c5520e6c2f52d8
--------------------------------

The tracing of invalidation and truncation operations on large files
showed that xa_get_order() is among the top functions where kernel spends
a lot of CPUs.  xa_get_order() needs to traverse the tree to reach the
right node for a given index and then extract the order of the entry.
However it seems like at many places it is being called within an already
happening tree traversal where there is no need to do another traversal.
Just use xas_get_order() at those places.

Shakeel Butt (1):
  mm: replace xa_get_order with xas_get_order where appropriate 

-- 
2.34.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/13350

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 583c2366 69855dbd
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -2113,7 +2113,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
					folio);
		} else {
			nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index);
			nr = 1 << xas_get_order(&xas);
			base = xas.xa_index & ~(nr - 1);
			/* Omit order>0 value which begins before the start */
			if (base < *start)
@@ -3039,7 +3039,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas,
static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
{
	if (xa_is_value(folio))
		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
		return PAGE_SIZE << xas_get_order(xas);
	return folio_size(folio);
}

@@ -4304,7 +4304,7 @@ static void filemap_cachestat(struct address_space *mapping,
		if (xas_retry(&xas, folio))
			continue;

		order = xa_get_order(xas.xa, xas.xa_index);
		order = xas_get_order(&xas);
		nr_pages = 1 << order;
		folio_first_index = round_down(xas.xa_index, 1 << order);
		folio_last_index = folio_first_index + nr_pages - 1;
+1 −1
Original line number Diff line number Diff line
@@ -891,7 +891,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
		if (xas_retry(&xas, page))
			continue;
		if (xa_is_value(page))
			swapped += 1 << xa_get_order(xas.xa, xas.xa_index);
			swapped += 1 << xas_get_order(&xas);
		if (xas.xa_index == max)
			break;
		if (need_resched()) {