Commit 814b1860 authored by Fuad Tabba's avatar Fuad Tabba Committed by Will Deacon
Browse files

arm64: __flush_dcache_area to take end parameter instead of size



To be consistent with other functions with similar names and
functionality in cacheflush.h, cache.S, and cachetlb.rst, change
to specify the range in terms of start and end, as opposed to
start and size.

No functional change intended.

Reported-by: default avatarWill Deacon <will@kernel.org>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Reviewed-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-13-tabba@google.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 163d3f80
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
#define gic_read_lpir(c)		readq_relaxed(c)
#define gic_write_lpir(v, c)		writeq_relaxed(v, c)

#define gic_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
#define gic_flush_dcache_to_poc(a,l)	\
	__flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))

#define gits_read_baser(c)		readq_relaxed(c)
#define gits_write_baser(v, c)		writeq_relaxed(v, c)
+4 −4
Original line number Diff line number Diff line
@@ -50,15 +50,15 @@
 *		- start  - virtual start address
 *		- end    - virtual end address
 *
 *	__flush_dcache_area(kaddr, size)
 *	__flush_dcache_area(start, end)
 *
 *		Ensure that the data held in page is written back.
 *		- kaddr  - page address
 *		- size   - region size
 *		- start  - virtual start address
 *		- end    - virtual end address
 */
extern void __flush_icache_range(unsigned long start, unsigned long end);
extern void invalidate_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __flush_dcache_area(unsigned long start, unsigned long end);
extern void __inval_dcache_area(unsigned long start, unsigned long end);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pop(void *addr, size_t len);
+1 −1
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);

static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
	__flush_dcache_area(addr, size);
	__flush_dcache_area((unsigned long)addr, (unsigned long)addr + size);
}

#endif /* _ASM_EFI_H */
+2 −1
Original line number Diff line number Diff line
@@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base,

struct kvm;

#define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
#define kvm_flush_dcache_to_poc(a,l)	\
	__flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))

static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
+11 −7
Original line number Diff line number Diff line
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
	return 0;
}

#define dcache_clean_range(start, end)	__flush_dcache_area(start, (end - start))

#ifdef CONFIG_ARM64_MTE

static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
		ret = swsusp_save();
	} else {
		/* Clean kernel core startup/idle code to PoC*/
		dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
		dcache_clean_range(__idmap_text_start, __idmap_text_end);
		__flush_dcache_area((unsigned long)__mmuoff_data_start,
				    (unsigned long)__mmuoff_data_end);
		__flush_dcache_area((unsigned long)__idmap_text_start,
				    (unsigned long)__idmap_text_end);

		/* Clean kvm setup code to PoC? */
		if (el2_reset_needed()) {
			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
			dcache_clean_range(__hyp_text_start, __hyp_text_end);
			__flush_dcache_area(
				(unsigned long)__hyp_idmap_text_start,
				(unsigned long)__hyp_idmap_text_end);
			__flush_dcache_area((unsigned long)__hyp_text_start,
					    (unsigned long)__hyp_text_end);
		}

		swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
	 * The hibernate exit text contains a set of el2 vectors, that will
	 * be executed at el2 with the mmu off in order to reload hyp-stub.
	 */
	__flush_dcache_area(hibernate_exit, exit_size);
	__flush_dcache_area((unsigned long)hibernate_exit,
			    (unsigned long)hibernate_exit + exit_size);

	/*
	 * KASLR will cause the el2 vectors to be in a different location in
Loading